diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /include/drm | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/drm')
121 files changed, 33560 insertions, 0 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h new file mode 100644 index 000000000..90b69270f --- /dev/null +++ b/include/drm/amd_asic_type.h @@ -0,0 +1,71 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_ASIC_TYPE_H__ +#define __AMD_ASIC_TYPE_H__ +/* + * Supported ASIC types + */ +enum amd_asic_type { + CHIP_TAHITI = 0, + CHIP_PITCAIRN, /* 1 */ + CHIP_VERDE, /* 2 */ + CHIP_OLAND, /* 3 */ + CHIP_HAINAN, /* 4 */ + CHIP_BONAIRE, /* 5 */ + CHIP_KAVERI, /* 6 */ + CHIP_KABINI, /* 7 */ + CHIP_HAWAII, /* 8 */ + CHIP_MULLINS, /* 9 */ + CHIP_TOPAZ, /* 10 */ + CHIP_TONGA, /* 11 */ + CHIP_FIJI, /* 12 */ + CHIP_CARRIZO, /* 13 */ + CHIP_STONEY, /* 14 */ + CHIP_POLARIS10, /* 15 */ + CHIP_POLARIS11, /* 16 */ + CHIP_POLARIS12, /* 17 */ + CHIP_VEGAM, /* 18 */ + CHIP_VEGA10, /* 19 */ + CHIP_VEGA12, /* 20 */ + CHIP_VEGA20, /* 21 */ + CHIP_RAVEN, /* 22 */ + CHIP_ARCTURUS, /* 23 */ + CHIP_RENOIR, /* 24 */ + CHIP_ALDEBARAN, /* 25 */ + CHIP_NAVI10, /* 26 */ + CHIP_CYAN_SKILLFISH, /* 27 */ + CHIP_NAVI14, /* 28 */ + CHIP_NAVI12, /* 29 */ + CHIP_SIENNA_CICHLID, /* 30 */ + CHIP_NAVY_FLOUNDER, /* 31 */ + CHIP_VANGOGH, /* 32 */ + CHIP_DIMGREY_CAVEFISH, /* 33 */ + CHIP_BEIGE_GOBY, /* 34 */ + CHIP_YELLOW_CARP, /* 35 */ + CHIP_IP_DISCOVERY, /* 36 */ + CHIP_LAST, +}; + +extern const char *amdgpu_asic_name[]; + +#endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h new file mode 100644 index 000000000..b0dcc0733 --- /dev/null +++ b/include/drm/bridge/analogix_dp.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Analogix DP (Display Port) Core interface driver. + * + * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + */ +#ifndef _ANALOGIX_DP_H_ +#define _ANALOGIX_DP_H_ + +#include <drm/drm_crtc.h> + +struct analogix_dp_device; + +enum analogix_dp_devtype { + EXYNOS_DP, + RK3288_DP, + RK3399_EDP, +}; + +static inline bool is_rockchip(enum analogix_dp_devtype type) +{ + return type == RK3288_DP || type == RK3399_EDP; +} + +struct analogix_dp_plat_data { + enum analogix_dp_devtype dev_type; + struct drm_panel *panel; + struct drm_encoder *encoder; + struct drm_connector *connector; + bool skip_connector; + + int (*power_on_start)(struct analogix_dp_plat_data *); + int (*power_on_end)(struct analogix_dp_plat_data *); + int (*power_off)(struct analogix_dp_plat_data *); + int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, + struct drm_connector *); + int (*get_modes)(struct analogix_dp_plat_data *, + struct drm_connector *); +}; + +int analogix_dp_resume(struct analogix_dp_device *dp); +int analogix_dp_suspend(struct analogix_dp_device *dp); + +struct analogix_dp_device * +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data); +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev); +void analogix_dp_unbind(struct analogix_dp_device *dp); +void analogix_dp_remove(struct analogix_dp_device *dp); + +int analogix_dp_start_crc(struct drm_connector *connector); +int analogix_dp_stop_crc(struct drm_connector *connector); + +#endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h new file mode 100644 index 000000000..f668e75fb --- /dev/null +++ b/include/drm/bridge/dw_hdmi.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2011 Freescale Semiconductor, Inc. + */ + +#ifndef __DW_HDMI__ +#define __DW_HDMI__ + +#include <sound/hdmi-codec.h> + +struct drm_display_info; +struct drm_display_mode; +struct drm_encoder; +struct dw_hdmi; +struct platform_device; + +/** + * DOC: Supported input formats and encodings + * + * Depending on the Hardware configuration of the Controller IP, it supports + * a subset of the following input formats and encodings on its internal + * 48bit bus. + * + * +----------------------+----------------------------------+------------------------------+ + * | Format Name | Format Code | Encodings | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + */ + +enum { + DW_HDMI_RES_8, + DW_HDMI_RES_10, + DW_HDMI_RES_12, + DW_HDMI_RES_MAX, +}; + +enum dw_hdmi_phy_type { + DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, + DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, + DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, + DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, + DW_HDMI_PHY_VENDOR_PHY = 0xfe, +}; + +struct dw_hdmi_mpll_config { + unsigned long mpixelclock; + struct { + u16 cpce; + u16 gmp; + } res[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_curr_ctrl { + unsigned long mpixelclock; + u16 curr[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_phy_config { + unsigned long mpixelclock; + u16 sym_ctr; /*clock symbol and transmitter control*/ + u16 term; /*transmission termination value*/ + u16 vlev_ctr; /* voltage level control */ +}; + +struct dw_hdmi_phy_ops { + int (*init)(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, + const struct drm_display_mode *mode); + void (*disable)(struct dw_hdmi *hdmi, void *data); + enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); + void (*update_hpd)(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); + void (*setup_hpd)(struct dw_hdmi *hdmi, void *data); +}; + +struct dw_hdmi_plat_data { + struct regmap *regm; + + unsigned int output_port; + + unsigned long input_bus_encoding; + bool use_drm_infoframe; + bool ycbcr_420_allowed; + + /* + * Private data passed to all the .mode_valid() and .configure_phy() + * callback functions. + */ + void *priv_data; + + /* Platform-specific mode validation (optional). */ + enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *info, + const struct drm_display_mode *mode); + + /* Platform-specific audio enable/disable (optional) */ + void (*enable_audio)(struct dw_hdmi *hdmi, int channel, + int width, int rate, int non_pcm); + void (*disable_audio)(struct dw_hdmi *hdmi); + + /* Vendor PHY support */ + const struct dw_hdmi_phy_ops *phy_ops; + const char *phy_name; + void *phy_data; + unsigned int phy_force_vendor; + + /* Synopsys PHY support */ + const struct dw_hdmi_mpll_config *mpll_cfg; + const struct dw_hdmi_curr_ctrl *cur_ctr; + const struct dw_hdmi_phy_config *phy_config; + int (*configure_phy)(struct dw_hdmi *hdmi, void *data, + unsigned long mpixelclock); + + unsigned int disable_cec : 1; +}; + +struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_remove(struct dw_hdmi *hdmi); +void dw_hdmi_unbind(struct dw_hdmi *hdmi); +struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_plat_data *plat_data); + +void dw_hdmi_resume(struct dw_hdmi *hdmi); + +void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); + +int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev); +void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); +void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); +void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); +void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); +void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); +void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); +void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); +void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); +void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, + const struct drm_display_info *display); + +/* PHY configuration */ +void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); +void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, + unsigned char addr); + +void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi); + +void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi); + +enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, + void *data); +void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); +void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); + +#endif /* __IMX_HDMI_H__ */ diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h new file mode 100644 index 000000000..5286a53a1 --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) STMicroelectronics SA 2017 + * + * Authors: Philippe Cornu <philippe.cornu@st.com> + * Yannick Fertre <yannick.fertre@st.com> + */ + +#ifndef __DW_MIPI_DSI__ +#define __DW_MIPI_DSI__ + +#include <linux/types.h> + +#include <drm/drm_modes.h> + +struct drm_display_mode; +struct drm_encoder; +struct dw_mipi_dsi; +struct mipi_dsi_device; +struct platform_device; + +struct dw_mipi_dsi_dphy_timing { + u16 data_hs2lp; + u16 data_lp2hs; + u16 clk_hs2lp; + u16 clk_lp2hs; +}; + +struct dw_mipi_dsi_phy_ops { + int (*init)(void *priv_data); + void (*power_on)(void *priv_data); + void (*power_off)(void *priv_data); + int (*get_lane_mbps)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); + int (*get_timing)(void *priv_data, unsigned int lane_mbps, + struct dw_mipi_dsi_dphy_timing *timing); + int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate); +}; + +struct dw_mipi_dsi_host_ops { + int (*attach)(void *priv_data, + struct mipi_dsi_device *dsi); + int (*detach)(void *priv_data, + struct mipi_dsi_device *dsi); +}; + +struct dw_mipi_dsi_plat_data { + void __iomem *base; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, + u32 lanes, u32 format); + + const struct dw_mipi_dsi_phy_ops *phy_ops; + const struct dw_mipi_dsi_host_ops *host_ops; + + void *priv_data; +}; + +struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev, + const struct dw_mipi_dsi_plat_data + *plat_data); +void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); +int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); +void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); +void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); + +#endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h new file mode 100644 index 000000000..d96626a0e --- /dev/null +++ b/include/drm/bridge/mhl.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Defines for Mobile High-Definition Link (MHL) interface + * + * Copyright (C) 2015, Samsung Electronics, Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + * + * Based on MHL driver for Android devices. + * Copyright (C) 2013-2014 Silicon Image, Inc. + */ + +#ifndef __MHL_H__ +#define __MHL_H__ + +#include <linux/types.h> + +/* Device Capabilities Registers */ +enum { + MHL_DCAP_DEV_STATE, + MHL_DCAP_MHL_VERSION, + MHL_DCAP_CAT, + MHL_DCAP_ADOPTER_ID_H, + MHL_DCAP_ADOPTER_ID_L, + MHL_DCAP_VID_LINK_MODE, + MHL_DCAP_AUD_LINK_MODE, + MHL_DCAP_VIDEO_TYPE, + MHL_DCAP_LOG_DEV_MAP, + MHL_DCAP_BANDWIDTH, + MHL_DCAP_FEATURE_FLAG, + MHL_DCAP_DEVICE_ID_H, + MHL_DCAP_DEVICE_ID_L, + MHL_DCAP_SCRATCHPAD_SIZE, + MHL_DCAP_INT_STAT_SIZE, + MHL_DCAP_RESERVED, + MHL_DCAP_SIZE +}; + +#define MHL_DCAP_CAT_SINK 0x01 +#define MHL_DCAP_CAT_SOURCE 0x02 +#define MHL_DCAP_CAT_POWER 0x10 +#define MHL_DCAP_CAT_PLIM(x) ((x) << 5) + +#define MHL_DCAP_VID_LINK_RGB444 0x01 +#define MHL_DCAP_VID_LINK_YCBCR444 0x02 +#define MHL_DCAP_VID_LINK_YCBCR422 0x04 +#define MHL_DCAP_VID_LINK_PPIXEL 0x08 +#define MHL_DCAP_VID_LINK_ISLANDS 0x10 +#define MHL_DCAP_VID_LINK_VGA 0x20 +#define MHL_DCAP_VID_LINK_16BPP 0x40 + +#define MHL_DCAP_AUD_LINK_2CH 0x01 +#define MHL_DCAP_AUD_LINK_8CH 0x02 + +#define MHL_DCAP_VT_GRAPHICS 0x00 +#define MHL_DCAP_VT_PHOTO 0x02 +#define MHL_DCAP_VT_CINEMA 0x04 +#define MHL_DCAP_VT_GAMES 0x08 +#define MHL_DCAP_SUPP_VT 0x80 + +#define MHL_DCAP_LD_DISPLAY 0x01 +#define MHL_DCAP_LD_VIDEO 0x02 +#define MHL_DCAP_LD_AUDIO 0x04 +#define MHL_DCAP_LD_MEDIA 0x08 +#define MHL_DCAP_LD_TUNER 0x10 +#define MHL_DCAP_LD_RECORD 0x20 +#define MHL_DCAP_LD_SPEAKER 0x40 +#define MHL_DCAP_LD_GUI 0x80 +#define MHL_DCAP_LD_ALL 0xFF + +#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01 +#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02 +#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04 +#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08 +#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10 +#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40 + +/* Extended Device Capabilities Registers */ +enum { + MHL_XDC_ECBUS_SPEEDS, + MHL_XDC_TMDS_SPEEDS, + MHL_XDC_ECBUS_ROLES, + MHL_XDC_LOG_DEV_MAPX, + MHL_XDC_SIZE +}; + +#define MHL_XDC_ECBUS_S_075 0x01 +#define MHL_XDC_ECBUS_S_8BIT 0x02 +#define MHL_XDC_ECBUS_S_12BIT 0x04 +#define MHL_XDC_ECBUS_D_150 0x10 +#define MHL_XDC_ECBUS_D_8BIT 0x20 + +#define MHL_XDC_TMDS_000 0x00 +#define MHL_XDC_TMDS_150 0x01 +#define MHL_XDC_TMDS_300 0x02 +#define MHL_XDC_TMDS_600 0x04 + +/* MHL_XDC_ECBUS_ROLES flags */ +#define MHL_XDC_DEV_HOST 0x01 +#define MHL_XDC_DEV_DEVICE 0x02 +#define MHL_XDC_DEV_CHARGER 0x04 +#define MHL_XDC_HID_HOST 0x08 +#define MHL_XDC_HID_DEVICE 0x10 + +/* MHL_XDC_LOG_DEV_MAPX flags */ +#define MHL_XDC_LD_PHONE 0x01 + +/* Device Status Registers */ +enum { + MHL_DST_CONNECTED_RDY, + MHL_DST_LINK_MODE, + MHL_DST_VERSION, + MHL_DST_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_DST_OFFSET 0x30 +#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name) + +#define MHL_DST_CONN_DCAP_RDY 0x01 +#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02 +#define MHL_DST_CONN_POW_STAT 0x04 +#define MHL_DST_CONN_PLIM_STAT_MASK 0x38 + +#define MHL_DST_LM_CLK_MODE_MASK 0x07 +#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02 +#define MHL_DST_LM_CLK_MODE_NORMAL 0x03 +#define MHL_DST_LM_PATH_EN_MASK 0x08 +#define MHL_DST_LM_PATH_ENABLED 0x08 +#define MHL_DST_LM_PATH_DISABLED 0x00 +#define MHL_DST_LM_MUTED_MASK 0x10 + +/* Extended Device Status Registers */ +enum { + MHL_XDS_CURR_ECBUS_MODE, + MHL_XDS_AVLINK_MODE_STATUS, + MHL_XDS_AVLINK_MODE_CONTROL, + MHL_XDS_MULTI_SINK_STATUS, + MHL_XDS_SIZE +}; + +/* Offset of XDEVSTAT registers */ +#define MHL_XDS_OFFSET 0x90 +#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name) + +/* MHL_XDS_REG_CURR_ECBUS_MODE flags */ +#define MHL_XDS_SLOT_MODE_8BIT 0x00 +#define MHL_XDS_SLOT_MODE_6BIT 0x01 +#define MHL_XDS_ECBUS_S 0x04 +#define MHL_XDS_ECBUS_D 0x08 + +#define MHL_XDS_LINK_CLOCK_75MHZ 0x00 +#define MHL_XDS_LINK_CLOCK_150MHZ 0x10 +#define MHL_XDS_LINK_CLOCK_300MHZ 0x20 +#define MHL_XDS_LINK_CLOCK_600MHZ 0x30 + +#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00 +#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01 +#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02 +#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03 + +#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00 +#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01 +#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02 +#define MHL_XDS_ATT_CAPABLE 0x08 + +#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01 +#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04 +#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10 +#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40 + +/* Interrupt Registers */ +enum { + MHL_INT_RCHANGE, + MHL_INT_DCHANGE, + MHL_INT_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_INT_OFFSET 0x20 +#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name) + +#define MHL_INT_RC_DCAP_CHG 0x01 +#define MHL_INT_RC_DSCR_CHG 0x02 +#define MHL_INT_RC_REQ_WRT 0x04 +#define MHL_INT_RC_GRT_WRT 0x08 +#define MHL_INT_RC_3D_REQ 0x10 +#define MHL_INT_RC_FEAT_REQ 0x20 +#define MHL_INT_RC_FEAT_COMPLETE 0x40 + +#define MHL_INT_DC_EDID_CHG 0x02 + +enum { + MHL_ACK = 0x33, /* Command or Data byte acknowledge */ + MHL_NACK = 0x34, /* Command or Data byte not acknowledge */ + MHL_ABORT = 0x35, /* Transaction abort */ + MHL_WRITE_STAT = 0xe0, /* Write one status register */ + MHL_SET_INT = 0x60, /* Write one interrupt register */ + MHL_READ_DEVCAP_REG = 0x61, /* Read one register */ + MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */ + MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */ + MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */ + MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */ + MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */ + MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */ + MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */ + MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */ + MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */ + MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */ + MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */ + MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */ + MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */ + MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */ + /* let the rest of these float, they are software specific */ + MHL_READ_EDID_BLOCK, + MHL_SEND_3D_REQ_OR_FEAT_REQ, + MHL_READ_DEVCAP, + MHL_READ_XDEVCAP +}; + +/* MSC message types */ +enum { + MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */ + MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */ + MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */ + MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */ + MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */ + MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */ + MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */ + MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */ + MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */ + MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */ + MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */ + MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */ + MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */ + MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */ + MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */ + MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */ + MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */ + MHL_MSC_MSG_BIST_TRIGGER = 0x60, + MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61, + MHL_MSC_MSG_BIST_READY = 0x62, + MHL_MSC_MSG_BIST_STOP = 0x63, +}; + +/* RAP action codes */ +#define MHL_RAP_POLL 0x00 /* Just do an ack */ +#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */ +#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */ +#define MHL_RAP_CBUS_MODE_DOWN 0x20 +#define MHL_RAP_CBUS_MODE_UP 0x21 + +/* RAPK status codes */ +#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */ +#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */ +#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ +#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ + +/* Bit masks for RCP messages */ +#define MHL_RCP_KEY_RELEASED_MASK 0x80 +#define MHL_RCP_KEY_ID_MASK 0x7F + +/* + * Error status codes for RCPE messages + */ +/* No error. (Not allowed in RCPE messages) */ +#define MHL_RCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RCPE_STATUS_BUSY 0x02 + +/* + * Error status codes for RBPE messages + */ +/* No error. (Not allowed in RBPE messages) */ +#define MHL_RBPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized button code */ +#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RBPE_STATUS_BUSY 0x02 + +/* + * Error status codes for UCPE messages + */ +/* No error. (Not allowed in UCPE messages) */ +#define MHL_UCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 + +enum mhl_burst_id { + MHL_BURST_ID_3D_VIC = 0x10, + MHL_BURST_ID_3D_DTD = 0x11, + MHL_BURST_ID_HEV_VIC = 0x20, + MHL_BURST_ID_HEV_DTDA = 0x21, + MHL_BURST_ID_HEV_DTDB = 0x22, + MHL_BURST_ID_VC_ASSIGN = 0x38, + MHL_BURST_ID_VC_CONFIRM = 0x39, + MHL_BURST_ID_AUD_DELAY = 0x40, + MHL_BURST_ID_ADT_BURSTID = 0x41, + MHL_BURST_ID_BIST_SETUP = 0x51, + MHL_BURST_ID_BIST_RETURN_STAT = 0x52, + MHL_BURST_ID_EMSC_SUPPORT = 0x61, + MHL_BURST_ID_HID_PAYLOAD = 0x62, + MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63, + MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64, +}; + +struct mhl_burst_blk_rcv_buffer_info { + __be16 id; + __le16 size; +} __packed; + +struct mhl3_burst_header { + __be16 id; + u8 checksum; + u8 total_entries; + u8 sequence_index; +} __packed; + +struct mhl_burst_bits_per_pixel_fmt { + struct mhl3_burst_header hdr; + u8 num_entries; + struct { + u8 stream_id; + u8 pixel_format; + } __packed desc[]; +} __packed; + +struct mhl_burst_emsc_support { + struct mhl3_burst_header hdr; + u8 num_entries; + __be16 burst_id[]; +} __packed; + +struct mhl_burst_audio_descr { + struct mhl3_burst_header hdr; + u8 flags; + u8 short_desc[9]; +} __packed; + +/* + * MHL3 infoframe related definitions + */ + +#define MHL3_IEEE_OUI 0x7ca61d +#define MHL3_INFOFRAME_SIZE 15 + +enum mhl3_video_format { + MHL3_VIDEO_FORMAT_NONE, + MHL3_VIDEO_FORMAT_3D, + MHL3_VIDEO_FORMAT_MULTI_VIEW, + MHL3_VIDEO_FORMAT_DUAL_3D +}; + +enum mhl3_3d_format_type { + MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */ + MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */ + MHL3_3D_FORMAT_TYPE_LR, /* left-right */ + MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */ + MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */ + MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */ +}; + +struct mhl3_infoframe { + unsigned char version; + enum mhl3_video_format video_format; + enum mhl3_3d_format_type format_type; + bool sep_audio; + int hev_format; + int av_delay; +}; + +#endif /* __MHL_H__ */ diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h new file mode 100644 index 000000000..b235d6833 --- /dev/null +++ b/include/drm/display/drm_dp.h @@ -0,0 +1,1699 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_H_ +#define _DRM_DP_H_ + +#include <linux/types.h> + +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * MST: Multistream Transport - part of DP 1.2a + * + * 1.2 formally includes both eDP and DPI definitions. + */ + +/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */ +#define DP_MSA_MISC_SYNC_CLOCK (1 << 0) +#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8) +#define DP_MSA_MISC_STEREO_NO_3D (0 << 9) +#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9) +#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9) +/* bits per component for non-RAW */ +#define DP_MSA_MISC_6_BPC (0 << 5) +#define DP_MSA_MISC_8_BPC (1 << 5) +#define DP_MSA_MISC_10_BPC (2 << 5) +#define DP_MSA_MISC_12_BPC (3 << 5) +#define DP_MSA_MISC_16_BPC (4 << 5) +/* bits per component for RAW */ +#define DP_MSA_MISC_RAW_6_BPC (1 << 5) +#define DP_MSA_MISC_RAW_7_BPC (2 << 5) +#define DP_MSA_MISC_RAW_8_BPC (3 << 5) +#define DP_MSA_MISC_RAW_10_BPC (4 << 5) +#define DP_MSA_MISC_RAW_12_BPC (5 << 5) +#define DP_MSA_MISC_RAW_14_BPC (6 << 5) +#define DP_MSA_MISC_RAW_16_BPC (7 << 5) +/* pixel encoding/colorimetry format */ +#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \ + ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1)) +#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0) +#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1) +#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0) +#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1) +#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1) +#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0) +#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1) +#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14) + +#define DP_AUX_MAX_PAYLOAD_BYTES 16 + +#define DP_AUX_I2C_WRITE 0x0 +#define DP_AUX_I2C_READ 0x1 +#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2 +#define DP_AUX_I2C_MOT 0x4 +#define DP_AUX_NATIVE_WRITE 0x8 +#define DP_AUX_NATIVE_READ 0x9 + +#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) +#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) +#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) +#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) + +#define DP_AUX_I2C_REPLY_ACK (0x0 << 2) +#define DP_AUX_I2C_REPLY_NACK (0x1 << 2) +#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) +#define DP_AUX_I2C_REPLY_MASK (0x3 << 2) + +/* DPCD Field Address Mapping */ + +/* Receiver Capability */ +#define DP_DPCD_REV 0x000 +# define DP_DPCD_REV_10 0x10 +# define DP_DPCD_REV_11 0x11 +# define DP_DPCD_REV_12 0x12 +# define DP_DPCD_REV_13 0x13 +# define DP_DPCD_REV_14 0x14 + +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_MAX_DOWNSPREAD_0_5 (1 << 0) +# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */ +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) +# define DP_TPS4_SUPPORTED (1 << 7) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) +# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) +# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) +# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) +# define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 +# define DP_CAP_ANSI_8B10B (1 << 0) +# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */ + +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) + +#define DP_RECEIVE_PORT_0_CAP_0 0x008 +# define DP_LOCAL_EDID_PRESENT (1 << 1) +# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) + +#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 + +#define DP_RECEIVE_PORT_1_CAP_0 0x00a +#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b + +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0) +# define DP_FRAMING_CHANGE_CAP (1 << 1) +# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ + +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */ +# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */ + +#define DP_ADAPTER_CAP 0x00f /* 1.2 */ +# define DP_FORCE_LOAD_SENSE_CAP (1 << 0) +# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1) + +#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */ +# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */ + +/* Multiple stream transport */ +#define DP_FAUX_CAP 0x020 /* 1.2 */ +# define DP_FAUX_CAP_1 (1 << 0) + +#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */ +# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0) +# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1) +# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2) + +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) +# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */ + +#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ + +/* AV_SYNC_DATA_BLOCK 1.2 */ +#define DP_AV_GRANULARITY 0x023 +# define DP_AG_FACTOR_MASK (0xf << 0) +# define DP_AG_FACTOR_3MS (0 << 0) +# define DP_AG_FACTOR_2MS (1 << 0) +# define DP_AG_FACTOR_1MS (2 << 0) +# define DP_AG_FACTOR_500US (3 << 0) +# define DP_AG_FACTOR_200US (4 << 0) +# define DP_AG_FACTOR_100US (5 << 0) +# define DP_AG_FACTOR_10US (6 << 0) +# define DP_AG_FACTOR_1US (7 << 0) +# define DP_VG_FACTOR_MASK (0xf << 4) +# define DP_VG_FACTOR_3MS (0 << 4) +# define DP_VG_FACTOR_2MS (1 << 4) +# define DP_VG_FACTOR_1MS (2 << 4) +# define DP_VG_FACTOR_500US (3 << 4) +# define DP_VG_FACTOR_200US (4 << 4) +# define DP_VG_FACTOR_100US (5 << 4) + +#define DP_AUD_DEC_LAT0 0x024 +#define DP_AUD_DEC_LAT1 0x025 + +#define DP_AUD_PP_LAT0 0x026 +#define DP_AUD_PP_LAT1 0x027 + +#define DP_VID_INTER_LAT 0x028 + +#define DP_VID_PROG_LAT 0x029 + +#define DP_REP_LAT 0x02a + +#define DP_AUD_DEL_INS0 0x02b +#define DP_AUD_DEL_INS1 0x02c +#define DP_AUD_DEL_INS2 0x02d +/* End of AV_SYNC_DATA_BLOCK */ + +#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ +# define DP_ALPM_CAP (1 << 0) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_CAP (1 << 0) + +#define DP_GUID 0x030 /* 1.2 */ + +#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */ +# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0) +# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1) +# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP (1 << 2) +# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP (1 << 3) + +#define DP_DSC_REV 0x061 +# define DP_DSC_MAJOR_MASK (0xf << 0) +# define DP_DSC_MINOR_MASK (0xf << 4) +# define DP_DSC_MAJOR_SHIFT 0 +# define DP_DSC_MINOR_SHIFT 4 + +#define DP_DSC_RC_BUF_BLK_SIZE 0x062 +# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0 +# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 +# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 +# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 + +#define DP_DSC_RC_BUF_SIZE 0x063 + +#define DP_DSC_SLICE_CAP_1 0x064 +# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3) +# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4) +# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5) +# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6) +# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7) + +#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065 +# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0) +# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0 +# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1 +# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2 +# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3 +# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4 +# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5 +# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6 +# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7 +# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8 + +#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066 +# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0) +# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1) + +#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ + +#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) +# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK (0x3 << 5) /* eDP 1.5 & DP 2.0 */ +# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY (1 << 7) /* eDP 1.5 & DP 2.0 */ + +#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 +# define DP_DSC_RGB (1 << 0) +# define DP_DSC_YCbCr444 (1 << 1) +# define DP_DSC_YCbCr422_Simple (1 << 2) +# define DP_DSC_YCbCr422_Native (1 << 3) +# define DP_DSC_YCbCr420_Native (1 << 4) + +#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A +# define DP_DSC_8_BPC (1 << 1) +# define DP_DSC_10_BPC (1 << 2) +# define DP_DSC_12_BPC (1 << 3) + +#define DP_DSC_PEAK_THROUGHPUT 0x06B +# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) +# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 +# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */ +# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) +# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 +# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4) + +#define DP_DSC_MAX_SLICE_WIDTH 0x06C +#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 +#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320 + +#define DP_DSC_SLICE_CAP_2 0x06D +# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2) + +#define DP_DSC_BITS_PER_PIXEL_INC 0x06F +# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f +# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0 +# define DP_DSC_BITS_PER_PIXEL_1_16 0x0 +# define DP_DSC_BITS_PER_PIXEL_1_8 0x1 +# define DP_DSC_BITS_PER_PIXEL_1_4 0x2 +# define DP_DSC_BITS_PER_PIXEL_1_2 0x3 +# define DP_DSC_BITS_PER_PIXEL_1_1 0x4 + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ +# define DP_PSR_IS_SUPPORTED 1 +# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ +# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ +# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */ + +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ +# define DP_PSR_NO_TRAIN_ON_EXIT 1 +# define DP_PSR_SETUP_TIME_330 (0 << 1) +# define DP_PSR_SETUP_TIME_275 (1 << 1) +# define DP_PSR_SETUP_TIME_220 (2 << 1) +# define DP_PSR_SETUP_TIME_165 (3 << 1) +# define DP_PSR_SETUP_TIME_110 (4 << 1) +# define DP_PSR_SETUP_TIME_55 (5 << 1) +# define DP_PSR_SETUP_TIME_0 (6 << 1) +# define DP_PSR_SETUP_TIME_MASK (7 << 1) +# define DP_PSR_SETUP_TIME_SHIFT 1 +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ +# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */ + +#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */ +#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */ + +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_TYPE_DP_DUALMODE 5 +# define DP_DS_PORT_TYPE_WIRELESS 6 +# define DP_DS_PORT_HPD (1 << 3) +# define DP_DS_NON_EDID_MASK (0xf << 4) +# define DP_DS_NON_EDID_720x480i_60 (1 << 4) +# define DP_DS_NON_EDID_720x480i_50 (2 << 4) +# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4) +# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4) +# define DP_DS_NON_EDID_1280x720_60 (5 << 4) +# define DP_DS_NON_EDID_1280x720_50 (7 << 4) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */ +/* offset 2 for VGA/DVI/HDMI */ +# define DP_DS_MAX_BPC_MASK (3 << 0) +# define DP_DS_8BPC 0 +# define DP_DS_10BPC 1 +# define DP_DS_12BPC 2 +# define DP_DS_16BPC 3 +/* HDMI2.1 PCON FRL CONFIGURATION */ +# define DP_PCON_MAX_FRL_BW (7 << 2) +# define DP_PCON_MAX_0GBPS (0 << 2) +# define DP_PCON_MAX_9GBPS (1 << 2) +# define DP_PCON_MAX_18GBPS (2 << 2) +# define DP_PCON_MAX_24GBPS (3 << 2) +# define DP_PCON_MAX_32GBPS (4 << 2) +# define DP_PCON_MAX_40GBPS (5 << 2) +# define DP_PCON_MAX_48GBPS (6 << 2) +# define DP_PCON_SOURCE_CTL_MODE (1 << 5) + +/* offset 3 for DVI */ +# define DP_DS_DVI_DUAL_LINK (1 << 1) +# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2) +/* offset 3 for HDMI */ +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0) +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1) +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2) +# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3) +# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4) + +/* + * VESA DP-to-HDMI PCON Specification adds caps for colorspace + * conversion in DFP cap DPCD 83h. Sec6.1 Table-3. + * Based on the available support the source can enable + * color conversion by writing into PROTOCOL_COVERTER_CONTROL_2 + * DPCD 3052h. + */ +# define DP_DS_HDMI_BT601_RGB_YCBCR_CONV (1 << 5) +# define DP_DS_HDMI_BT709_RGB_YCBCR_CONV (1 << 6) +# define DP_DS_HDMI_BT2020_RGB_YCBCR_CONV (1 << 7) + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +/* DP Forward error Correction Registers */ +#define DP_FEC_CAPABILITY 0x090 /* 1.4 */ +# define DP_FEC_CAPABLE (1 << 0) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) +# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) +#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */ + +/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */ +#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */ +#define DP_PCON_DSC_ENCODER 0x092 +# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0) +# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1) + +/* DP-HDMI2.1 PCON DSC Version */ +#define DP_PCON_DSC_VERSION 0x093 +# define DP_PCON_DSC_MAJOR_MASK (0xF << 0) +# define DP_PCON_DSC_MINOR_MASK (0xF << 4) +# define DP_PCON_DSC_MAJOR_SHIFT 0 +# define DP_PCON_DSC_MINOR_SHIFT 4 + +/* DP-HDMI2.1 PCON DSC RC Buffer block size */ +#define DP_PCON_DSC_RC_BUF_BLK_INFO 0x094 +# define DP_PCON_DSC_RC_BUF_BLK_SIZE (0x3 << 0) +# define DP_PCON_DSC_RC_BUF_BLK_1KB 0 +# define DP_PCON_DSC_RC_BUF_BLK_4KB 1 +# define DP_PCON_DSC_RC_BUF_BLK_16KB 2 +# define DP_PCON_DSC_RC_BUF_BLK_64KB 3 + +/* DP-HDMI2.1 PCON DSC RC Buffer size */ +#define DP_PCON_DSC_RC_BUF_SIZE 0x095 + +/* DP-HDMI2.1 PCON DSC Slice capabilities-1 */ +#define DP_PCON_DSC_SLICE_CAP_1 0x096 +# define DP_PCON_DSC_1_PER_DSC_ENC (0x1 << 0) +# define DP_PCON_DSC_2_PER_DSC_ENC (0x1 << 1) +# define DP_PCON_DSC_4_PER_DSC_ENC (0x1 << 3) +# define DP_PCON_DSC_6_PER_DSC_ENC (0x1 << 4) +# define DP_PCON_DSC_8_PER_DSC_ENC (0x1 << 5) +# define DP_PCON_DSC_10_PER_DSC_ENC (0x1 << 6) +# define DP_PCON_DSC_12_PER_DSC_ENC (0x1 << 7) + +#define DP_PCON_DSC_BUF_BIT_DEPTH 0x097 +# define DP_PCON_DSC_BIT_DEPTH_MASK (0xF << 0) +# define DP_PCON_DSC_DEPTH_9_BITS 0 +# define DP_PCON_DSC_DEPTH_10_BITS 1 +# define DP_PCON_DSC_DEPTH_11_BITS 2 +# define DP_PCON_DSC_DEPTH_12_BITS 3 +# define DP_PCON_DSC_DEPTH_13_BITS 4 +# define DP_PCON_DSC_DEPTH_14_BITS 5 +# define DP_PCON_DSC_DEPTH_15_BITS 6 +# define DP_PCON_DSC_DEPTH_16_BITS 7 +# define DP_PCON_DSC_DEPTH_8_BITS 8 + +#define DP_PCON_DSC_BLOCK_PREDICTION 0x098 +# define DP_PCON_DSC_BLOCK_PRED_SUPPORT (0x1 << 0) + +#define DP_PCON_DSC_ENC_COLOR_FMT_CAP 0x099 +# define DP_PCON_DSC_ENC_RGB (0x1 << 0) +# define DP_PCON_DSC_ENC_YUV444 (0x1 << 1) +# define DP_PCON_DSC_ENC_YUV422_S (0x1 << 2) +# define DP_PCON_DSC_ENC_YUV422_N (0x1 << 3) +# define DP_PCON_DSC_ENC_YUV420_N (0x1 << 4) + +#define DP_PCON_DSC_ENC_COLOR_DEPTH_CAP 0x09A +# define DP_PCON_DSC_ENC_8BPC (0x1 << 1) +# define DP_PCON_DSC_ENC_10BPC (0x1 << 2) +# define DP_PCON_DSC_ENC_12BPC (0x1 << 3) + +#define DP_PCON_DSC_MAX_SLICE_WIDTH 0x09B + +/* DP-HDMI2.1 PCON DSC Slice capabilities-2 */ +#define DP_PCON_DSC_SLICE_CAP_2 0x09C +# define DP_PCON_DSC_16_PER_DSC_ENC (0x1 << 0) +# define DP_PCON_DSC_20_PER_DSC_ENC (0x1 << 1) +# define DP_PCON_DSC_24_PER_DSC_ENC (0x1 << 2) + +/* DP-HDMI2.1 PCON HDMI TX Encoder Bits/pixel increment */ +#define DP_PCON_DSC_BPP_INCR 0x09E +# define DP_PCON_DSC_BPP_INCR_MASK (0x7 << 0) +# define DP_PCON_DSC_ONE_16TH_BPP 0 +# define DP_PCON_DSC_ONE_8TH_BPP 1 +# define DP_PCON_DSC_ONE_4TH_BPP 2 +# define DP_PCON_DSC_ONE_HALF_BPP 3 +# define DP_PCON_DSC_ONE_BPP 4 + +/* DP Extended DSC Capabilities */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 +#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 + +/* DFP Capability Extension */ +#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ + +/* Link Configuration */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ +# define DP_LINK_BW_8_1 0x1e /* 1.4 */ +# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */ +# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */ +# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */ + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */ +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ +# define DP_TRAINING_PATTERN_4 7 /* 1.4 */ +# define DP_TRAINING_PATTERN_MASK 0x3 +# define DP_TRAINING_PATTERN_MASK_1_4 0xf + +/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ +# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */ + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) +# define DP_SET_ANSI_128B132B (1 << 1) + +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0) +# define DP_FRAMING_CHANGE_ENABLE (1 << 1) +# define DP_PANEL_SELF_TEST_ENABLE (1 << 7) + +#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */ +#define DP_LINK_QUAL_LANE1_SET 0x10c +#define DP_LINK_QUAL_LANE2_SET 0x10d +#define DP_LINK_QUAL_LANE3_SET 0x10e +# define DP_LINK_QUAL_PATTERN_DISABLE 0 +# define DP_LINK_QUAL_PATTERN_D10_2 1 +# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 +# define DP_LINK_QUAL_PATTERN_PRBS7 3 +# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6 +# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7 +/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */ +# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08 +# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10 +# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18 +# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20 +# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28 +# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30 +# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 +# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 +# define DP_LINK_QUAL_PATTERN_SQUARE 0x48 + +#define DP_TRAINING_LANE0_1_SET2 0x10f +#define DP_TRAINING_LANE2_3_SET2 0x110 +# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0) +# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2) +# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4) +# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6) + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_AUDIO_DELAY0 0x112 /* 1.2 */ +#define DP_AUDIO_DELAY1 0x113 +#define DP_AUDIO_DELAY2 0x114 + +#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */ +# define DP_LINK_RATE_SET_SHIFT 0 +# define DP_LINK_RATE_SET_MASK (7 << 0) + +#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ +# define DP_ALPM_ENABLE (1 << 0) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) +# define DP_IRQ_HPD_ENABLE (1 << 1) + +#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ +# define DP_PWR_NOT_NEEDED (1 << 0) + +#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ +# define DP_FEC_READY (1 << 0) +# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) +# define DP_FEC_ERR_COUNT_DIS (0 << 1) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1) +# define DP_FEC_BIT_ERROR_COUNT (3 << 1) +# define DP_FEC_LANE_SELECT_MASK (3 << 4) +# define DP_FEC_LANE_0_SELECT (0 << 4) +# define DP_FEC_LANE_1_SELECT (1 << 4) +# define DP_FEC_LANE_2_SELECT (2 << 4) +# define DP_FEC_LANE_3_SELECT (3 << 4) + +#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_VALID (1 << 0) + +#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ +# define DP_DECOMPRESSION_EN (1 << 0) +#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE BIT(0) +# define DP_PSR_MAIN_LINK_ACTIVE BIT(1) +# define DP_PSR_CRC_VERIFICATION BIT(2) +# define DP_PSR_FRAME_CAPTURE BIT(3) +# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */ +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */ +# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */ + +#define DP_ADAPTER_CTRL 0x1a0 +# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) + +#define DP_BRANCH_DEVICE_CTRL 0x1a1 +# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) + +#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 +#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 +#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 + +/* Link/Sink Device Status */ +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 +# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) +# define DP_AUTOMATED_TEST_REQUEST (1 << 1) +# define DP_CP_IRQ (1 << 2) +# define DP_MCCS_IRQ (1 << 3) +# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ +# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ +# define DP_SINK_SPECIFIC_IRQ (1 << 6) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ +#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ +#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 +# define DP_RECEIVE_PORT_0_STATUS (1 << 0) +# define DP_RECEIVE_PORT_1_STATUS (1 << 1) +# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */ +# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */ + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +/* DP 2.0 128b/132b Link Layer */ +# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0) +# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0 +# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4) +# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4 + +#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c +# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03 +# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0 +# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c +# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2 +# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30 +# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4 +# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0 +# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6 + +#define DP_TEST_REQUEST 0x218 +# define DP_TEST_LINK_TRAINING (1 << 0) +# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) +# define DP_TEST_LINK_EDID_READ (1 << 2) +# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ +# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */ + +#define DP_TEST_LINK_RATE 0x219 +# define DP_LINK_RATE_162 (0x6) +# define DP_LINK_RATE_27 (0xa) + +#define DP_TEST_LANE_COUNT 0x220 + +#define DP_TEST_PATTERN 0x221 +# define DP_NO_TEST_PATTERN 0x0 +# define DP_COLOR_RAMP 0x1 +# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2 +# define DP_COLOR_SQUARE 0x3 + +#define DP_TEST_H_TOTAL_HI 0x222 +#define DP_TEST_H_TOTAL_LO 0x223 + +#define DP_TEST_V_TOTAL_HI 0x224 +#define DP_TEST_V_TOTAL_LO 0x225 + +#define DP_TEST_H_START_HI 0x226 +#define DP_TEST_H_START_LO 0x227 + +#define DP_TEST_V_START_HI 0x228 +#define DP_TEST_V_START_LO 0x229 + +#define DP_TEST_HSYNC_HI 0x22A +# define DP_TEST_HSYNC_POLARITY (1 << 7) +# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_HSYNC_WIDTH_LO 0x22B + +#define DP_TEST_VSYNC_HI 0x22C +# define DP_TEST_VSYNC_POLARITY (1 << 7) +# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_VSYNC_WIDTH_LO 0x22D + +#define DP_TEST_H_WIDTH_HI 0x22E +#define DP_TEST_H_WIDTH_LO 0x22F + +#define DP_TEST_V_HEIGHT_HI 0x230 +#define DP_TEST_V_HEIGHT_LO 0x231 + +#define DP_TEST_MISC0 0x232 +# define DP_TEST_SYNC_CLOCK (1 << 0) +# define DP_TEST_COLOR_FORMAT_MASK (3 << 1) +# define DP_TEST_COLOR_FORMAT_SHIFT 1 +# define DP_COLOR_FORMAT_RGB (0 << 1) +# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) +# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) +# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3) +# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) +# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4) +# define DP_TEST_BIT_DEPTH_MASK (7 << 5) +# define DP_TEST_BIT_DEPTH_SHIFT 5 +# define DP_TEST_BIT_DEPTH_6 (0 << 5) +# define DP_TEST_BIT_DEPTH_8 (1 << 5) +# define DP_TEST_BIT_DEPTH_10 (2 << 5) +# define DP_TEST_BIT_DEPTH_12 (3 << 5) +# define DP_TEST_BIT_DEPTH_16 (4 << 5) + +#define DP_TEST_MISC1 0x233 +# define DP_TEST_REFRESH_DENOMINATOR (1 << 0) +# define DP_TEST_INTERLACED (1 << 1) + +#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234 + +#define DP_TEST_MISC0 0x232 + +#define DP_TEST_CRC_R_CR 0x240 +#define DP_TEST_CRC_G_Y 0x242 +#define DP_TEST_CRC_B_CB 0x244 + +#define DP_TEST_SINK_MISC 0x246 +# define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_COUNT_MASK 0xf + +#define DP_PHY_TEST_PATTERN 0x248 +# define DP_PHY_TEST_PATTERN_SEL_MASK 0x7 +# define DP_PHY_TEST_PATTERN_NONE 0x0 +# define DP_PHY_TEST_PATTERN_D10_2 0x1 +# define DP_PHY_TEST_PATTERN_ERROR_COUNT 0x2 +# define DP_PHY_TEST_PATTERN_PRBS7 0x3 +# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4 +# define DP_PHY_TEST_PATTERN_CP2520 0x5 + +#define DP_PHY_SQUARE_PATTERN 0x249 + +#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A +#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 +#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 +#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 +#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253 +#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254 +#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255 +#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256 +#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257 +#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258 +#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259 + +#define DP_TEST_RESPONSE 0x260 +# define DP_TEST_ACK (1 << 0) +# define DP_TEST_NAK (1 << 1) +# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) + +#define DP_TEST_EDID_CHECKSUM 0x261 + +#define DP_TEST_SINK 0x270 +# define DP_TEST_SINK_START (1 << 0) +#define DP_TEST_AUDIO_MODE 0x271 +#define DP_TEST_AUDIO_PATTERN_TYPE 0x272 +#define DP_TEST_AUDIO_PERIOD_CH1 0x273 +#define DP_TEST_AUDIO_PERIOD_CH2 0x274 +#define DP_TEST_AUDIO_PERIOD_CH3 0x275 +#define DP_TEST_AUDIO_PERIOD_CH4 0x276 +#define DP_TEST_AUDIO_PERIOD_CH5 0x277 +#define DP_TEST_AUDIO_PERIOD_CH6 0x278 +#define DP_TEST_AUDIO_PERIOD_CH7 0x279 +#define DP_TEST_AUDIO_PERIOD_CH8 0x27A + +#define DP_FEC_STATUS 0x280 /* 1.4 */ +# define DP_FEC_DECODE_EN_DETECTED (1 << 0) +# define DP_FEC_DECODE_DIS_DETECTED (1 << 1) + +#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */ + +#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */ +# define DP_FEC_ERROR_COUNT_MASK 0x7F +# define DP_FEC_ERR_COUNT_VALID (1 << 7) + +#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ +# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) +# define DP_PAYLOAD_ACT_HANDLED (1 << 1) + +#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ +/* up to ID_SLOT_63 at 0x2ff */ + +/* Source Device-specific */ +#define DP_SOURCE_OUI 0x300 + +/* Sink Device-specific */ +#define DP_SINK_OUI 0x400 + +/* Branch Device-specific */ +#define DP_BRANCH_OUI 0x500 +#define DP_BRANCH_ID 0x503 +#define DP_BRANCH_REVISION_START 0x509 +#define DP_BRANCH_HW_REV 0x509 +#define DP_BRANCH_SW_REV 0x50A + +/* Link/Sink Device Power Control */ +#define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 +# define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 + +/* eDP-specific */ +#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ +# define DP_EDP_11 0x00 +# define DP_EDP_12 0x01 +# define DP_EDP_13 0x02 +# define DP_EDP_14 0x03 +# define DP_EDP_14a 0x04 /* eDP 1.4a */ +# define DP_EDP_14b 0x05 /* eDP 1.4b */ + +#define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) + +#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) + +#define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) + +#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 + +#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) + +#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ + +#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 +#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 + +#define DP_EDP_PWMGEN_BIT_COUNT 0x724 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 +# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0) + +#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 + +#define DP_EDP_BACKLIGHT_FREQ_SET 0x728 +# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000 + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f + +#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 +#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 + +#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ +#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ + +#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */ +# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0) +# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0 +# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3) + +/* Sideband MSG Buffers */ +#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ + +/* DPRX Event Status Indicator */ +#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */ +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */ + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ +# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) +# define DP_LOCK_ACQUISITION_REQUEST (1 << 1) +# define DP_CEC_IRQ (1 << 2) + +#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ +# define RX_CAP_CHANGED (1 << 0) +# define LINK_STATUS_CHANGED (1 << 1) +# define STREAM_STATUS_CHANGED (1 << 2) +# define HDMI_LINK_STATUS_CHANGED (1 << 3) +# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4) + +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ +# define DP_PSR_LINK_CRC_ERROR (1 << 0) +# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) +# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */ + +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ +# define DP_PSR_CAPS_CHANGE (1 << 0) + +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ +# define DP_PSR_SINK_INACTIVE 0 +# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 +# define DP_PSR_SINK_ACTIVE_RFB 2 +# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 +# define DP_PSR_SINK_ACTIVE_RESYNC 4 +# define DP_PSR_SINK_INTERNAL_ERROR 7 +# define DP_PSR_SINK_STATE_MASK 0x07 + +#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */ +# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0) +# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0 +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 + +#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ +# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ +# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ +# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ +# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ +# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ +# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ +# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ + +#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ +# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) + +#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */ +#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ +#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ +#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ + +/* Extended Receiver Capability: See DP_DPCD_REV for definitions */ +#define DP_DP13_DPCD_REV 0x2200 + +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + +#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */ +# define DP_UHBR10 (1 << 0) +# define DP_UHBR20 (1 << 1) +# define DP_UHBR13_5 (1 << 2) + +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7) +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06 + +#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230 +#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250 + +/* DSC Extended Capability Branch Total DSC Resources */ +#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */ +# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5) +# define DP_DSC_DECODER_COUNT_SHIFT 5 +#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */ +# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1) +# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1 + +/* Protocol Converter Extension */ +/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */ +#define DP_CEC_TUNNELING_CAPABILITY 0x3000 +# define DP_CEC_TUNNELING_CAPABLE (1 << 0) +# define DP_CEC_SNOOPING_CAPABLE (1 << 1) +# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2) + +#define DP_CEC_TUNNELING_CONTROL 0x3001 +# define DP_CEC_TUNNELING_ENABLE (1 << 0) +# define DP_CEC_SNOOPING_ENABLE (1 << 1) + +#define DP_CEC_RX_MESSAGE_INFO 0x3002 +# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4) +# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5) +# define DP_CEC_RX_MESSAGE_ACKED (1 << 6) +# define DP_CEC_RX_MESSAGE_ENDED (1 << 7) + +#define DP_CEC_TX_MESSAGE_INFO 0x3003 +# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4) +# define DP_CEC_TX_RETRY_COUNT_SHIFT 4 +# define DP_CEC_TX_MESSAGE_SEND (1 << 7) + +#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004 +# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0) +# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1) +# define DP_CEC_TX_MESSAGE_SENT (1 << 4) +# define DP_CEC_TX_LINE_ERROR (1 << 5) +# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6) +# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7) + +#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */ +# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7) +#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */ +# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7) + +#define DP_CEC_RX_MESSAGE_BUFFER 0x3010 +#define DP_CEC_TX_MESSAGE_BUFFER 0x3020 +#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 + +/* PCON CONFIGURE-1 FRL FOR HDMI SINK */ +#define DP_PCON_HDMI_LINK_CONFIG_1 0x305A +# define DP_PCON_ENABLE_MAX_FRL_BW (7 << 0) +# define DP_PCON_ENABLE_MAX_BW_0GBPS 0 +# define DP_PCON_ENABLE_MAX_BW_9GBPS 1 +# define DP_PCON_ENABLE_MAX_BW_18GBPS 2 +# define DP_PCON_ENABLE_MAX_BW_24GBPS 3 +# define DP_PCON_ENABLE_MAX_BW_32GBPS 4 +# define DP_PCON_ENABLE_MAX_BW_40GBPS 5 +# define DP_PCON_ENABLE_MAX_BW_48GBPS 6 +# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3) +# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4) +# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4) +# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5) +# define DP_PCON_ENABLE_HPD_READY (1 << 6) +# define DP_PCON_ENABLE_HDMI_LINK (1 << 7) + +/* PCON CONFIGURE-2 FRL FOR HDMI SINK */ +#define DP_PCON_HDMI_LINK_CONFIG_2 0x305B +# define DP_PCON_MAX_LINK_BW_MASK (0x3F << 0) +# define DP_PCON_FRL_BW_MASK_9GBPS (1 << 0) +# define DP_PCON_FRL_BW_MASK_18GBPS (1 << 1) +# define DP_PCON_FRL_BW_MASK_24GBPS (1 << 2) +# define DP_PCON_FRL_BW_MASK_32GBPS (1 << 3) +# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4) +# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5) +# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6) +# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6) + +/* PCON HDMI LINK STATUS */ +#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B +# define DP_PCON_HDMI_TX_LINK_ACTIVE (1 << 0) +# define DP_PCON_FRL_READY (1 << 1) + +/* PCON HDMI POST FRL STATUS */ +#define DP_PCON_HDMI_POST_FRL_STATUS 0x3036 +# define DP_PCON_HDMI_LINK_MODE (1 << 0) +# define DP_PCON_HDMI_MODE_TMDS 0 +# define DP_PCON_HDMI_MODE_FRL 1 +# define DP_PCON_HDMI_FRL_TRAINED_BW (0x3F << 1) +# define DP_PCON_FRL_TRAINED_BW_9GBPS (1 << 1) +# define DP_PCON_FRL_TRAINED_BW_18GBPS (1 << 2) +# define DP_PCON_FRL_TRAINED_BW_24GBPS (1 << 3) +# define DP_PCON_FRL_TRAINED_BW_32GBPS (1 << 4) +# define DP_PCON_FRL_TRAINED_BW_40GBPS (1 << 5) +# define DP_PCON_FRL_TRAINED_BW_48GBPS (1 << 6) + +#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */ +# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */ +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */ +# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_PCON_ENABLE_DSC_ENCODER (1 << 1) +# define DP_PCON_ENCODER_PPS_OVERRIDE_MASK (0x3 << 2) +# define DP_PCON_ENC_PPS_OVERRIDE_DISABLED 0 +# define DP_PCON_ENC_PPS_OVERRIDE_EN_PARAMS 1 +# define DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER 2 +# define DP_CONVERSION_RGB_YCBCR_MASK (7 << 4) +# define DP_CONVERSION_BT601_RGB_YCBCR_ENABLE (1 << 4) +# define DP_CONVERSION_BT709_RGB_YCBCR_ENABLE (1 << 5) +# define DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE (1 << 6) + +/* PCON Downstream HDMI ERROR Status per Lane */ +#define DP_PCON_HDMI_ERROR_STATUS_LN0 0x3037 +#define DP_PCON_HDMI_ERROR_STATUS_LN1 0x3038 +#define DP_PCON_HDMI_ERROR_STATUS_LN2 0x3039 +#define DP_PCON_HDMI_ERROR_STATUS_LN3 0x303A +# define DP_PCON_HDMI_ERROR_COUNT_MASK (0x7 << 0) +# define DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS (1 << 0) +# define DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS (1 << 1) +# define DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS (1 << 2) + +/* PCON HDMI CONFIG PPS Override Buffer + * Valid Offsets to be added to Base : 0-127 + */ +#define DP_PCON_HDMI_PPS_OVERRIDE_BASE 0x3100 + +/* PCON HDMI CONFIG PPS Override Parameter: Slice height + * Offset-0 8LSBs of the Slice height. + * Offset-1 8MSBs of the Slice height. + */ +#define DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT 0x3180 + +/* PCON HDMI CONFIG PPS Override Parameter: Slice width + * Offset-0 8LSBs of the Slice width. + * Offset-1 8MSBs of the Slice width. + */ +#define DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH 0x3182 + +/* PCON HDMI CONFIG PPS Override Parameter: bits_per_pixel + * Offset-0 8LSBs of the bits_per_pixel. + * Offset-1 2MSBs of the bits_per_pixel. + */ +#define DP_PCON_HDMI_PPS_OVRD_BPP 0x3184 + +/* HDCP 1.3 and HDCP 2.2 */ +#define DP_AUX_HDCP_BKSV 0x68000 +#define DP_AUX_HDCP_RI_PRIME 0x68005 +#define DP_AUX_HDCP_AKSV 0x68007 +#define DP_AUX_HDCP_AN 0x6800C +#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) +#define DP_AUX_HDCP_BCAPS 0x68028 +# define DP_BCAPS_REPEATER_PRESENT BIT(1) +# define DP_BCAPS_HDCP_CAPABLE BIT(0) +#define DP_AUX_HDCP_BSTATUS 0x68029 +# define DP_BSTATUS_REAUTH_REQ BIT(3) +# define DP_BSTATUS_LINK_FAILURE BIT(2) +# define DP_BSTATUS_R0_PRIME_READY BIT(1) +# define DP_BSTATUS_READY BIT(0) +#define DP_AUX_HDCP_BINFO 0x6802A +#define DP_AUX_HDCP_KSV_FIFO 0x6802C +#define DP_AUX_HDCP_AINFO 0x6803B + +/* DP HDCP2.2 parameter offsets in DPCD address space */ +#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000 +#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008 +#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B +#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215 +#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D +#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220 +#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0 +#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0 +#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0 +#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0 +#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0 +#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8 +#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318 +#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328 +#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330 +#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332 +#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335 +#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345 +#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0 +#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0 +#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3 +#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5 +#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473 +#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493 +#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 +#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 + +/* LTTPR: Link Training (LT)-tunable PHY Repeaters */ +#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ +#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ +#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */ +#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */ +#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */ +#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */ +#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */ +#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */ +# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0) +/* See DP_128B132B_SUPPORTED_LINK_RATES for values */ +#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */ +#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */ + +enum drm_dp_phy { + DP_PHY_DPRX, + + DP_PHY_LTTPR1, + DP_PHY_LTTPR2, + DP_PHY_LTTPR3, + DP_PHY_LTTPR4, + DP_PHY_LTTPR5, + DP_PHY_LTTPR6, + DP_PHY_LTTPR7, + DP_PHY_LTTPR8, + + DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8, +}; + +#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i)) + +#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */ +#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */ +#define DP_LTTPR_BASE(dp_phy) \ + (__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1)) + +#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \ + (DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg)) + +#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */ +#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1) + +#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */ +#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1) + +#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */ +#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */ +#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */ +#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */ +#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) + +#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */ +# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0) +# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1) + +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */ +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) +/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */ + +#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */ +#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1) + +#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */ + +#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ + +#define __DP_FEC1_BASE 0xf0290 /* 1.4 */ +#define __DP_FEC2_BASE 0xf0298 /* 1.4 */ +#define DP_FEC_BASE(dp_phy) \ + (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ + ((dp_phy) - DP_PHY_LTTPR1))) + +#define DP_FEC_REG(dp_phy, fec1_reg) \ + (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) + +#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ +#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ + DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) + +#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ +#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ + +#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ + +#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ + +/* Repeater modes */ +#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ +#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ + +/* DP HDCP message start offsets in DPCD address space */ +#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET +#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET +#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET +#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET +#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET +#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \ + DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET +#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET +#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET +#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET +#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET +#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET +#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET +#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET + +#define HDCP_2_2_DP_RXSTATUS_LEN 1 +#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1)) +#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2)) +#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) +#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4)) + +/* DP 1.2 Sideband message defines */ +/* peer device type - DP 1.2a Table 2-92 */ +#define DP_PEER_DEVICE_NONE 0x0 +#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 +#define DP_PEER_DEVICE_MST_BRANCHING 0x2 +#define DP_PEER_DEVICE_SST_SINK 0x3 +#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 + +/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ +#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */ +#define DP_LINK_ADDRESS 0x01 +#define DP_CONNECTION_STATUS_NOTIFY 0x02 +#define DP_ENUM_PATH_RESOURCES 0x10 +#define DP_ALLOCATE_PAYLOAD 0x11 +#define DP_QUERY_PAYLOAD 0x12 +#define DP_RESOURCE_STATUS_NOTIFY 0x13 +#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 +#define DP_REMOTE_DPCD_READ 0x20 +#define DP_REMOTE_DPCD_WRITE 0x21 +#define DP_REMOTE_I2C_READ 0x22 +#define DP_REMOTE_I2C_WRITE 0x23 +#define DP_POWER_UP_PHY 0x24 +#define DP_POWER_DOWN_PHY 0x25 +#define DP_SINK_EVENT_NOTIFY 0x30 +#define DP_QUERY_STREAM_ENC_STATUS 0x38 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2 + +/* DP 1.2 MST sideband reply types */ +#define DP_SIDEBAND_REPLY_ACK 0x00 +#define DP_SIDEBAND_REPLY_NAK 0x01 + +/* DP 1.2 MST sideband nak reasons - table 2.84 */ +#define DP_NAK_WRITE_FAILURE 0x01 +#define DP_NAK_INVALID_READ 0x02 +#define DP_NAK_CRC_FAILURE 0x03 +#define DP_NAK_BAD_PARAM 0x04 +#define DP_NAK_DEFER 0x05 +#define DP_NAK_LINK_FAILURE 0x06 +#define DP_NAK_NO_RESOURCES 0x07 +#define DP_NAK_DPCD_FAIL 0x08 +#define DP_NAK_I2C_NAK 0x09 +#define DP_NAK_ALLOCATE_FAIL 0x0a + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + +#define DP_LINK_CONSTANT_N_VALUE 0x8000 +#define DP_LINK_STATUS_SIZE 6 + +#define DP_BRANCH_OUI_HEADER_SIZE 0xc +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ +#define EDP_PSR_RECEIVER_CAP_SIZE 2 +#define EDP_DISPLAY_CTL_CAP_SIZE 3 +#define DP_LTTPR_COMMON_CAP_SIZE 8 +#define DP_LTTPR_PHY_CAP_SIZE 3 + +#define DP_SDP_AUDIO_TIMESTAMP 0x01 +#define DP_SDP_AUDIO_STREAM 0x02 +#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ +#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ +#define DP_SDP_ISRC 0x06 /* DP 1.2 */ +#define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ +#define DP_SDP_PPS 0x10 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ +/* 0x80+ CEA-861 infoframe types */ + +#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b + +/** + * struct dp_sdp_header - DP secondary data packet header + * @HB0: Secondary Data Packet ID + * @HB1: Secondary Data Packet Type + * @HB2: Secondary Data Packet Specific header, Byte 0 + * @HB3: Secondary Data packet Specific header, Byte 1 + */ +struct dp_sdp_header { + u8 HB0; + u8 HB1; + u8 HB2; + u8 HB3; +} __packed; + +#define EDP_SDP_HEADER_REVISION_MASK 0x1F +#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F +#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F + +/** + * struct dp_sdp - DP secondary data packet + * @sdp_header: DP secondary data packet header + * @db: DP secondaray data packet data blocks + * VSC SDP Payload for PSR + * db[0]: Stereo Interface + * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid + * db[2]: CRC value bits 7:0 of the R or Cr component + * db[3]: CRC value bits 15:8 of the R or Cr component + * db[4]: CRC value bits 7:0 of the G or Y component + * db[5]: CRC value bits 15:8 of the G or Y component + * db[6]: CRC value bits 7:0 of the B or Cb component + * db[7]: CRC value bits 15:8 of the B or Cb component + * db[8] - db[31]: Reserved + * VSC SDP Payload for Pixel Encoding/Colorimetry Format + * db[0] - db[15]: Reserved + * db[16]: Pixel Encoding and Colorimetry Formats + * db[17]: Dynamic Range and Component Bit Depth + * db[18]: Content Type + * db[19] - db[31]: Reserved + */ +struct dp_sdp { + struct dp_sdp_header sdp_header; + u8 db[32]; +} __packed; + +#define EDP_VSC_PSR_STATE_ACTIVE (1<<0) +#define EDP_VSC_PSR_UPDATE_RFB (1<<1) +#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) + +/** + * enum dp_pixelformat - drm DP Pixel encoding formats + * + * This enum is used to indicate DP VSC SDP Pixel encoding formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_PIXELFORMAT_RGB: RGB pixel encoding format + * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format + * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format + * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format + * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format + * @DP_PIXELFORMAT_RAW: RAW pixel encoding format + * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format + */ +enum dp_pixelformat { + DP_PIXELFORMAT_RGB = 0, + DP_PIXELFORMAT_YUV444 = 0x1, + DP_PIXELFORMAT_YUV422 = 0x2, + DP_PIXELFORMAT_YUV420 = 0x3, + DP_PIXELFORMAT_Y_ONLY = 0x4, + DP_PIXELFORMAT_RAW = 0x5, + DP_PIXELFORMAT_RESERVED = 0x6, +}; + +/** + * enum dp_colorimetry - drm DP Colorimetry formats + * + * This enum is used to indicate DP VSC SDP Colorimetry formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * + * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or + * ITU-R BT.601 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format + * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format + * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format + * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format + * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format + * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format + * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format + * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format + * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format + * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format + * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format + */ +enum dp_colorimetry { + DP_COLORIMETRY_DEFAULT = 0, + DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1, + DP_COLORIMETRY_BT709_YCC = 0x1, + DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2, + DP_COLORIMETRY_XVYCC_601 = 0x2, + DP_COLORIMETRY_OPRGB = 0x3, + DP_COLORIMETRY_XVYCC_709 = 0x3, + DP_COLORIMETRY_DCI_P3_RGB = 0x4, + DP_COLORIMETRY_SYCC_601 = 0x4, + DP_COLORIMETRY_RGB_CUSTOM = 0x5, + DP_COLORIMETRY_OPYCC_601 = 0x5, + DP_COLORIMETRY_BT2020_RGB = 0x6, + DP_COLORIMETRY_BT2020_CYCC = 0x6, + DP_COLORIMETRY_BT2020_YCC = 0x7, +}; + +/** + * enum dp_dynamic_range - drm DP Dynamic Range + * + * This enum is used to indicate DP VSC SDP Dynamic Range. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_DYNAMIC_RANGE_VESA: VESA range + * @DP_DYNAMIC_RANGE_CTA: CTA range + */ +enum dp_dynamic_range { + DP_DYNAMIC_RANGE_VESA = 0, + DP_DYNAMIC_RANGE_CTA = 1, +}; + +/** + * enum dp_content_type - drm DP Content Type + * + * This enum is used to indicate DP VSC SDP Content Types. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * CTA-861-G defines content types and expected processing by a sink device + * + * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type + * @DP_CONTENT_TYPE_GRAPHICS: Graphics type + * @DP_CONTENT_TYPE_PHOTO: Photo type + * @DP_CONTENT_TYPE_VIDEO: Video type + * @DP_CONTENT_TYPE_GAME: Game type + */ +enum dp_content_type { + DP_CONTENT_TYPE_NOT_DEFINED = 0x00, + DP_CONTENT_TYPE_GRAPHICS = 0x01, + DP_CONTENT_TYPE_PHOTO = 0x02, + DP_CONTENT_TYPE_VIDEO = 0x03, + DP_CONTENT_TYPE_GAME = 0x04, +}; + +#endif /* _DRM_DP_H_ */ diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h new file mode 100644 index 000000000..8a0a48638 --- /dev/null +++ b/include/drm/display/drm_dp_aux_bus.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Google Inc. + * + * The DP AUX bus is used for devices that are connected over a DisplayPort + * AUX bus. The devices on the far side of the bus are referred to as + * endpoints in this code. + */ + +#ifndef _DP_AUX_BUS_H_ +#define _DP_AUX_BUS_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +/** + * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints + * + * This is used to instantiate devices that are connected via a DP AUX + * bus. Usually the device is a panel, but conceivable other devices could + * be hooked up there. + */ +struct dp_aux_ep_device { + /** @dev: The normal dev pointer */ + struct device dev; + /** @aux: Pointer to the aux bus */ + struct drm_dp_aux *aux; +}; + +struct dp_aux_ep_driver { + int (*probe)(struct dp_aux_ep_device *aux_ep); + void (*remove)(struct dp_aux_ep_device *aux_ep); + void (*shutdown)(struct dp_aux_ep_device *aux_ep); + struct device_driver driver; +}; + +static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev) +{ + return container_of(dev, struct dp_aux_ep_device, dev); +} + +static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv) +{ + return container_of(drv, struct dp_aux_ep_driver, driver); +} + +int of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); +void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux); +int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); + +/* Deprecated versions of the above functions. To be removed when no callers. */ +static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = devm_of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux) +{ + of_dp_aux_depopulate_bus(aux); +} + +#define dp_aux_dp_driver_register(aux_ep_drv) \ + __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE) +int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv, + struct module *owner); +void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv); + +#endif /* _DP_AUX_BUS_H_ */ diff --git a/include/drm/display/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h new file mode 100644 index 000000000..7ee482265 --- /dev/null +++ b/include/drm/display/drm_dp_dual_mode_helper.h @@ -0,0 +1,121 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_DP_DUAL_MODE_HELPER_H +#define DRM_DP_DUAL_MODE_HELPER_H + +#include <linux/types.h> + +/* + * Optional for type 1 DVI adaptors + * Mandatory for type 1 HDMI and type 2 adaptors + */ +#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ +#define DP_DUAL_MODE_HDMI_ID_LEN 16 +/* + * Optional for type 1 adaptors + * Mandatory for type 2 adaptors + */ +#define DP_DUAL_MODE_ADAPTOR_ID 0x10 +#define DP_DUAL_MODE_REV_MASK 0x07 +#define DP_DUAL_MODE_REV_TYPE2 0x00 +#define DP_DUAL_MODE_TYPE_MASK 0xf0 +#define DP_DUAL_MODE_TYPE_TYPE2 0xa0 +/* This field is marked reserved in dual mode spec, used in LSPCON */ +#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08 +#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ +#define DP_DUAL_IEEE_OUI_LEN 3 +#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ +#define DP_DUAL_DEVICE_ID_LEN 6 +#define DP_DUAL_MODE_HARDWARE_REV 0x1a +#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b +#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c +#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d +#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e +#define DP_DUAL_MODE_TMDS_OEN 0x20 +#define DP_DUAL_MODE_TMDS_DISABLE 0x01 +#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 +#define DP_DUAL_MODE_CEC_ENABLE 0x01 +#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 + +/* LSPCON specific registers, defined by MCA */ +#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40 +#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 +#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 + +struct drm_device; +struct i2c_adapter; + +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size); +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size); + +/** + * enum drm_lspcon_mode + * @DRM_LSPCON_MODE_INVALID: No LSPCON. + * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON + * which drives DP++ to HDMI 1.4 conversion. + * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON + * which drives DP++ to HDMI 2.0 active conversion. + */ +enum drm_lspcon_mode { + DRM_LSPCON_MODE_INVALID, + DRM_LSPCON_MODE_LS, + DRM_LSPCON_MODE_PCON, +}; + +/** + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor + * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter + */ +enum drm_dp_dual_mode_type { + DRM_DP_DUAL_MODE_NONE, + DRM_DP_DUAL_MODE_UNKNOWN, + DRM_DP_DUAL_MODE_TYPE1_DVI, + DRM_DP_DUAL_MODE_TYPE1_HDMI, + DRM_DP_DUAL_MODE_TYPE2_DVI, + DRM_DP_DUAL_MODE_TYPE2_HDMI, + DRM_DP_DUAL_MODE_LSPCON, +}; + +enum drm_dp_dual_mode_type +drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter); +int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool *enabled); +int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable); +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); + +int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter, + enum drm_lspcon_mode *current_mode); +int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter, + enum drm_lspcon_mode reqd_mode); +#endif diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h new file mode 100644 index 000000000..ade9df59e --- /dev/null +++ b/include/drm/display/drm_dp_helper.h @@ -0,0 +1,765 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +#include <linux/delay.h> +#include <linux/i2c.h> + +#include <drm/display/drm_dp.h> +#include <drm/drm_connector.h> + +struct drm_device; +struct drm_dp_aux; +struct drm_panel; + +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); + +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); + +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_clock_recovery_delay(void); +void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +const char *drm_dp_phy_name(enum drm_dp_phy dp_phy); + +/** + * struct drm_dp_vsc_sdp - drm DP VSC SDP + * + * This structure represents a DP VSC SDP of drm + * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and + * [Table 2-117: VSC SDP Payload for DB16 through DB18] + * + * @sdp_type: secondary-data packet type + * @revision: revision number + * @length: number of valid data bytes + * @pixelformat: pixel encoding format + * @colorimetry: colorimetry format + * @bpc: bit per color + * @dynamic_range: dynamic range information + * @content_type: CTA-861-G defines content types and expected processing by a sink device + */ +struct drm_dp_vsc_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + enum dp_pixelformat pixelformat; + enum dp_colorimetry colorimetry; + int bpc; + enum dp_dynamic_range dynamic_range; + enum dp_content_type content_type; +}; + +void drm_dp_vsc_sdp_log(const char *level, struct device *dev, + const struct drm_dp_vsc_sdp *vsc); + +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); + +static inline int +drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static inline bool +drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); +} + +static inline bool +drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + +static inline bool +drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 || + dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5; +} + +static inline bool +drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x14 && + dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; +} + +static inline u8 +drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : + DP_TRAINING_PATTERN_MASK; +} + +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + +/* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp); +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]); + +static inline bool +drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_DECOMPRESSION_IS_SUPPORTED; +} + +static inline u16 +drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | + ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8); +} + +static inline u32 +drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + /* Max Slicewidth = Number of Pixels * 320 */ + return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * + DP_DSC_SLICE_WIDTH_MULTIPLIER; +} + +/* Forward Error Correction Support on DP 1.4 */ +static inline bool +drm_dp_sink_supports_fec(const u8 fec_capable) +{ + return fec_capable & DP_FEC_CAPABLE; +} + +static inline bool +drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; +} + +static inline bool +drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_EDP_CONFIGURATION_CAP] & + DP_ALTERNATE_SCRAMBLER_RESET_CAP; +} + +/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ +static inline bool +drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_MSA_TIMING_PAR_IGNORED; +} + +/** + * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support + * @edp_dpcd: The DPCD to check + * + * Note that currently this function will return %false for panels which support various DPCD + * backlight features but which require the brightness be set through PWM, and don't support setting + * the brightness level via the DPCD. + * + * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false + * otherwise + */ +static inline bool +drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) +{ + return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); +} + +/* + * DisplayPort AUX channel + */ + +/** + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction + * @address: address of the (first) register to access + * @request: contains the type of transaction (see DP_AUX_* macros) + * @reply: upon completion, contains the reply type of the transaction + * @buffer: pointer to a transmission or reception buffer + * @size: size of @buffer + */ +struct drm_dp_aux_msg { + unsigned int address; + u8 request; + u8 reply; + void *buffer; + size_t size; +}; + +struct cec_adapter; +struct edid; +struct drm_connector; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @connector: the connector this CEC adapter is associated with + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + struct drm_connector *connector; + struct delayed_work unregister_work; +}; + +/** + * struct drm_dp_aux - DisplayPort AUX channel + * + * An AUX channel can also be used to transport I2C messages to a sink. A + * typical application of that is to access an EDID that's present in the sink + * device. The @transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C adapter + * that can be passed to drm_probe_ddc(). Upon removal, drivers should call + * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long + * transfers by default; if a partial response is received, the adapter will + * drop down to the size given by the partial response for this transaction + * only. + */ +struct drm_dp_aux { + /** + * @name: user-visible name of this AUX channel and the + * I2C-over-AUX adapter. + * + * It's also used to specify the name of the I2C adapter. If set + * to %NULL, dev_name() of @dev will be used. + */ + const char *name; + + /** + * @ddc: I2C adapter that can be used for I2C-over-AUX + * communication + */ + struct i2c_adapter ddc; + + /** + * @dev: pointer to struct device that is the parent for this + * AUX channel. + */ + struct device *dev; + + /** + * @drm_dev: pointer to the &drm_device that owns this AUX channel. + * Beware, this may be %NULL before drm_dp_aux_register() has been + * called. + * + * It should be set to the &drm_device that will be using this AUX + * channel as early as possible. For many graphics drivers this should + * happen before drm_dp_aux_init(), however it's perfectly fine to set + * this field later so long as it's assigned before calling + * drm_dp_aux_register(). + */ + struct drm_device *drm_dev; + + /** + * @crtc: backpointer to the crtc that is currently using this + * AUX channel + */ + struct drm_crtc *crtc; + + /** + * @hw_mutex: internal mutex used for locking transfers. + * + * Note that if the underlying hardware is shared among multiple + * channels, the driver needs to do additional locking to + * prevent concurrent access. + */ + struct mutex hw_mutex; + + /** + * @crc_work: worker that captures CRCs for each frame + */ + struct work_struct crc_work; + + /** + * @crc_count: counter of captured frame CRCs + */ + u8 crc_count; + + /** + * @transfer: transfers a message representing a single AUX + * transaction. + * + * This is a hardware-specific implementation of how + * transactions are executed that the drivers must provide. + * + * A pointer to a &drm_dp_aux_msg structure describing the + * transaction is passed into this function. Upon success, the + * implementation should return the number of payload bytes that + * were transferred, or a negative error-code on failure. + * + * Helpers will propagate these errors, with the exception of + * the %-EBUSY error, which causes a transaction to be retried. + * On a short, helpers will return %-EPROTO to make it simpler + * to check for failure. + * + * The @transfer() function must only modify the reply field of + * the &drm_dp_aux_msg structure. The retry logic and i2c + * helpers assume this is the case. + * + * Also note that this callback can be called no matter the + * state @dev is in and also no matter what state the panel is + * in. It's expected: + * + * - If the @dev providing the AUX bus is currently unpowered then + * it will power itself up for the transfer. + * + * - If we're on eDP (using a drm_panel) and the panel is not in a + * state where it can respond (it's not powered or it's in a + * low power state) then this function may return an error, but + * not crash. It's up to the caller of this code to make sure that + * the panel is powered on if getting an error back is not OK. If a + * drm_panel driver is initiating a DP AUX transfer it may power + * itself up however it wants. All other code should ensure that + * the pre_enable() bridge chain (which eventually calls the + * drm_panel prepare function) has powered the panel. + */ + ssize_t (*transfer)(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg); + + /** + * @wait_hpd_asserted: wait for HPD to be asserted + * + * This is mainly useful for eDP panels drivers to wait for an eDP + * panel to finish powering on. This is an optional function. + * + * This function will efficiently wait for the HPD signal to be + * asserted. The `wait_us` parameter that is passed in says that we + * know that the HPD signal is expected to be asserted within `wait_us` + * microseconds. This function could wait for longer than `wait_us` if + * the logic in the DP controller has a long debouncing time. The + * important thing is that if this function returns success that the + * DP controller is ready to send AUX transactions. + * + * This function returns 0 if HPD was asserted or -ETIMEDOUT if time + * expired and HPD wasn't asserted. This function should not print + * timeout errors to the log. + * + * The semantics of this function are designed to match the + * readx_poll_timeout() function. That means a `wait_us` of 0 means + * to wait forever. Like readx_poll_timeout(), this function may sleep. + * + * NOTE: this function specifically reports the state of the HPD pin + * that's associated with the DP AUX channel. This is different from + * the HPD concept in much of the rest of DRM which is more about + * physical presence of a display. For eDP, for instance, a display is + * assumed always present even if the HPD pin is deasserted. + */ + int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us); + + /** + * @i2c_nack_count: Counts I2C NACKs, used for DP validation. + */ + unsigned i2c_nack_count; + /** + * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. + */ + unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; + /** + * @is_remote: Is this AUX CH actually using sideband messaging. + */ + bool is_remote; +}; + +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); + +/** + * drm_dp_dpcd_readb() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_writeb() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write(aux, offset, &value, 1); +} + +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]); + +int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, + enum drm_dp_phy dp_phy, + u8 link_status[DP_LINK_STATUS_SIZE]); + +bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, + u8 real_edid_checksum); + +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type); +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux); +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]); + +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); + +int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, + u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]); +bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); + +void drm_dp_remote_aux_init(struct drm_dp_aux *aux); +void drm_dp_aux_init(struct drm_dp_aux *aux); +int drm_dp_aux_register(struct drm_dp_aux *aux); +void drm_dp_aux_unregister(struct drm_dp_aux *aux); + +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); +int drm_dp_stop_crc(struct drm_dp_aux *aux); + +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_CONSTANT_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. So will give a constant value (0x8000) for compatability. + */ + DP_DPCD_QUIRK_CONSTANT_N, + /** + * @DP_DPCD_QUIRK_NO_PSR: + * + * The device does not support PSR even if reports that it supports or + * driver still need to implement proper handling for such device. + */ + DP_DPCD_QUIRK_NO_PSR, + /** + * @DP_DPCD_QUIRK_NO_SINK_COUNT: + * + * The device does not set SINK_COUNT to a non-zero value. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. + */ + DP_DPCD_QUIRK_NO_SINK_COUNT, + /** + * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: + * + * The device supports MST DSC despite not supporting Virtual DPCD. + * The DSC caps can be read from the physical aux instead. + */ + DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, + /** + * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: + * + * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite + * the DP_MAX_LINK_RATE register reporting a lower max multiplier. + */ + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device descriptor filled by drm_dp_read_desc() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +{ + return desc->quirks & BIT(quirk); +} + +/** + * struct drm_edp_backlight_info - Probed eDP backlight info struct + * @pwmgen_bit_count: The pwmgen bit count + * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any + * @max: The maximum backlight level that may be set + * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? + * @aux_enable: Does the panel support the AUX enable cap? + * @aux_set: Does the panel support setting the brightness through AUX? + * + * This structure contains various data about an eDP backlight, which can be populated by using + * drm_edp_backlight_init(). + */ +struct drm_edp_backlight_info { + u8 pwmgen_bit_count; + u8 pwm_freq_pre_divider; + u16 max; + + bool lsb_reg_used : 1; + bool aux_enable : 1; + bool aux_set : 1; +}; + +int +drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], + u16 *current_level, u8 *current_mode); +int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, + u16 level); +int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl); + +#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) + +int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux); + +#else + +static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, + struct drm_dp_aux *aux) +{ + return 0; +} + +#endif + +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void +drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + +/** + * struct drm_dp_phy_test_params - DP Phy Compliance parameters + * @link_rate: Requested Link rate from DPCD 0x219 + * @num_lanes: Number of lanes requested by sing through DPCD 0x220 + * @phy_pattern: DP Phy test pattern from DPCD 0x248 + * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B + * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 + * @enhanced_frame_cap: flag for enhanced frame capability. + */ +struct drm_dp_phy_test_params { + int link_rate; + u8 num_lanes; + u8 phy_pattern; + u8 hbr2_reset[2]; + u8 custom80[10]; + bool enhanced_frame_cap; +}; + +int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data); +int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data, u8 dp_rev); +int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); +bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, + u8 frl_mode); +int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, + u8 frl_type); +int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); +int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); + +bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux); +int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask); +void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, + struct drm_connector *connector); +bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]); +int drm_dp_pcon_pps_default(struct drm_dp_aux *aux); +int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]); +int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]); +bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 color_spc); +int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); + +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h new file mode 100644 index 000000000..40e855c84 --- /dev/null +++ b/include/drm/display/drm_dp_mst_helper.h @@ -0,0 +1,1014 @@ +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef _DRM_DP_MST_HELPER_H_ +#define _DRM_DP_MST_HELPER_H_ + +#include <linux/types.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/drm_atomic.h> + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stackdepot.h> +#include <linux/timekeeping.h> + +enum drm_dp_mst_topology_ref_type { + DRM_DP_MST_TOPOLOGY_REF_GET, + DRM_DP_MST_TOPOLOGY_REF_PUT, +}; + +struct drm_dp_mst_topology_ref_history { + struct drm_dp_mst_topology_ref_entry { + enum drm_dp_mst_topology_ref_type type; + int count; + ktime_t ts_nsec; + depot_stack_handle_t backtrace; + } *entries; + int len; +}; +#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ + +struct drm_dp_mst_branch; + +/** + * struct drm_dp_mst_port - MST port + * @port_num: port number + * @input: if this port is an input port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mcs: message capability status - DP 1.2 spec. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @pdt: Peer Device Type. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ldps: Legacy Device Plug Status. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @dpcd_rev: DPCD revision of device on this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_streams: Number of simultaneous streams. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_stream_sinks: Number of stream sinks. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @full_pbn: Max possible bandwidth for this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @next: link to next port on this branch device + * @aux: i2c aux transport to talk to device connected to this port, protected + * by &drm_dp_mst_topology_mgr.base.lock. + * @passthrough_aux: parent aux to which DSC pass-through requests should be + * sent, only set if DSC pass-through is possible. + * @parent: branch device parent of this port + * @vcpi: Virtual Channel Payload info for this port. + * @connector: DRM connector this port is connected to. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mgr: topology manager this port lives under. + * + * This structure represents an MST port endpoint on a device somewhere + * in the MST topology. + */ +struct drm_dp_mst_port { + /** + * @topology_kref: refcount for this port's lifetime in the topology, + * only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_port_malloc() and + * drm_dp_mst_put_port_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + u8 port_num; + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + uint16_t full_pbn; + struct list_head next; + /** + * @mstb: the branch device connected to this port, if there is one. + * This should be considered protected for reading by + * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from writing concurrently + * by &drm_dp_mst_topology_mgr.probe_lock. + */ + struct drm_dp_mst_branch *mstb; + struct drm_dp_aux aux; /* i2c bus for this port? */ + struct drm_dp_aux *passthrough_aux; + struct drm_dp_mst_branch *parent; + + struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + + /** + * @cached_edid: for DP logical ports - make tiling work by ensuring + * that the EDID for all connectors is read immediately. + */ + struct edid *cached_edid; + /** + * @has_audio: Tracks whether the sink connector to this port is + * audio-capable. + */ + bool has_audio; + + /** + * @fec_capable: bool indicating if FEC can be supported up to that + * point in the MST topology. + */ + bool fec_capable; +}; + +/* sideband msg header - not bit struct */ +struct drm_dp_sideband_msg_hdr { + u8 lct; + u8 lcr; + u8 rad[8]; + bool broadcast; + bool path_msg; + u8 msg_len; + bool somt; + bool eomt; + bool seqno; +}; + +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; +}; + +/** + * struct drm_dp_mst_branch - MST branch device. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. + */ +struct drm_dp_mst_branch { + /** + * @topology_kref: refcount for this branch device's lifetime in the + * topology, only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_mstb_malloc() and + * drm_dp_mst_put_mstb_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + /** + * @destroy_next: linked-list entry used by + * drm_dp_delayed_destroy_work() + */ + struct list_head destroy_next; + + u8 rad[8]; + u8 lct; + int num_ports; + + /** + * @ports: the list of ports on this branch device. This should be + * considered protected for reading by &drm_dp_mst_topology_mgr.lock. + * There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from updating the list + * concurrently by @drm_dp_mst_topology_mgr.probe_lock + */ + struct list_head ports; + + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; +}; + + +struct drm_dp_nak_reply { + u8 guid[16]; + u8 reason; + u8 nak_data; +}; + +struct drm_dp_link_address_ack_reply { + u8 guid[16]; + u8 nports; + struct drm_dp_link_addr_reply_port { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + } ports[16]; +}; + +struct drm_dp_remote_dpcd_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_dpcd_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_remote_dpcd_write_nak_reply { + u8 port_number; + u8 reason; + u8 bytes_written_before_failure; +}; + +struct drm_dp_remote_i2c_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_i2c_read_nak_reply { + u8 port_number; + u8 nak_reason; + u8 i2c_nak_transaction; +}; + +struct drm_dp_remote_i2c_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_query_stream_enc_status_ack_reply { + /* Bit[23:16]- Stream Id */ + u8 stream_id; + + /* Bit[15]- Signed */ + bool reply_signed; + + /* Bit[10:8]- Stream Output Sink Type */ + bool unauthorizable_device_present; + bool legacy_device_present; + bool query_capable_device_present; + + /* Bit[12:11]- Stream Output CP Type */ + bool hdcp_1x_device_present; + bool hdcp_2x_device_present; + + /* Bit[4]- Stream Authentication */ + bool auth_completed; + + /* Bit[3]- Stream Encryption */ + bool encryption_enabled; + + /* Bit[2]- Stream Repeater Function Present */ + bool repeater_present; + + /* Bit[1:0]- Stream State */ + u8 state; +}; + +#define DRM_DP_MAX_SDP_STREAMS 16 +struct drm_dp_allocate_payload { + u8 port_number; + u8 number_sdp_streams; + u8 vcpi; + u16 pbn; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; +}; + +struct drm_dp_allocate_payload_ack_reply { + u8 port_number; + u8 vcpi; + u16 allocated_pbn; +}; + +struct drm_dp_connection_status_notify { + u8 guid[16]; + u8 port_number; + bool legacy_device_plug_status; + bool displayport_device_plug_status; + bool message_capability_status; + bool input_port; + u8 peer_device_type; +}; + +struct drm_dp_remote_dpcd_read { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; +}; + +struct drm_dp_remote_dpcd_write { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; + u8 *bytes; +}; + +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 +struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; + struct drm_dp_remote_i2c_read_tx { + u8 i2c_dev_id; + u8 num_bytes; + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; + u8 read_i2c_device_id; + u8 num_bytes_read; +}; + +struct drm_dp_remote_i2c_write { + u8 port_number; + u8 write_i2c_device_id; + u8 num_bytes; + u8 *bytes; +}; + +struct drm_dp_query_stream_enc_status { + u8 stream_id; + u8 client_id[7]; /* 56-bit nonce */ + u8 stream_event; + bool valid_stream_event; + u8 stream_behavior; + u8 valid_stream_behavior; +}; + +/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_req { + u8 port_number; +}; + +struct drm_dp_enum_path_resources_ack_reply { + u8 port_number; + bool fec_capable; + u16 full_payload_bw_number; + u16 avail_payload_bw_number; +}; + +/* covers POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_rep { + u8 port_number; +}; + +struct drm_dp_query_payload { + u8 port_number; + u8 vcpi; +}; + +struct drm_dp_resource_status_notify { + u8 port_number; + u8 guid[16]; + u16 available_pbn; +}; + +struct drm_dp_query_payload_ack_reply { + u8 port_number; + u16 allocated_pbn; +}; + +struct drm_dp_sideband_msg_req_body { + u8 req_type; + union ack_req { + struct drm_dp_connection_status_notify conn_stat; + struct drm_dp_port_number_req port_num; + struct drm_dp_resource_status_notify resource_stat; + + struct drm_dp_query_payload query_payload; + struct drm_dp_allocate_payload allocate_payload; + + struct drm_dp_remote_dpcd_read dpcd_read; + struct drm_dp_remote_dpcd_write dpcd_write; + + struct drm_dp_remote_i2c_read i2c_read; + struct drm_dp_remote_i2c_write i2c_write; + + struct drm_dp_query_stream_enc_status enc_status; + } u; +}; + +struct drm_dp_sideband_msg_reply_body { + u8 reply_type; + u8 req_type; + union ack_replies { + struct drm_dp_nak_reply nak; + struct drm_dp_link_address_ack_reply link_addr; + struct drm_dp_port_number_rep port_number; + + struct drm_dp_enum_path_resources_ack_reply path_resources; + struct drm_dp_allocate_payload_ack_reply allocate_payload; + struct drm_dp_query_payload_ack_reply query_payload; + + struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; + struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; + struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; + + struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; + struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; + struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + + struct drm_dp_query_stream_enc_status_ack_reply enc_status; + } u; +}; + +/* msg is queued to be put into a slot */ +#define DRM_DP_SIDEBAND_TX_QUEUED 0 +/* msg has started transmitting on a slot - still on msgq */ +#define DRM_DP_SIDEBAND_TX_START_SEND 1 +/* msg has finished transmitting on a slot - removed from msgq only in slot */ +#define DRM_DP_SIDEBAND_TX_SENT 2 +/* msg has received a response - removed from slot */ +#define DRM_DP_SIDEBAND_TX_RX 3 +#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 + +struct drm_dp_sideband_msg_tx { + u8 msg[256]; + u8 chunk[48]; + u8 cur_offset; + u8 cur_len; + struct drm_dp_mst_branch *dst; + struct list_head next; + int seqno; + int state; + bool path_msg; + struct drm_dp_sideband_msg_reply_body reply; +}; + +/* sideband msg handler */ +struct drm_dp_mst_topology_mgr; +struct drm_dp_mst_topology_cbs { + /* create a connector for a port */ + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + /* + * Checks for any pending MST interrupts, passing them to MST core for + * processing, the same way an HPD IRQ pulse handler would do this. + * If provided MST core calls this callback from a poll-waiting loop + * when waiting for MST down message replies. The driver is expected + * to guard against a race between this callback and the driver's HPD + * IRQ pulse handler. + */ + void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); +}; + +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + +/** + * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload + * + * The primary atomic state structure for a given MST payload. Stores information like current + * bandwidth allocation, intended action for this payload, etc. + */ +struct drm_dp_mst_atomic_payload { + /** @port: The MST port assigned to this payload */ + struct drm_dp_mst_port *port; + + /** + * @vc_start_slot: The time slot that this payload starts on. Because payload start slots + * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic + * check time. This shouldn't usually matter, as the start slot should never be relevant for + * atomic state computations. + * + * Since this value is determined at commit time instead of check time, this value is + * protected by the MST helpers ensuring that async commits operating on the given topology + * never run in parallel. In the event that a driver does need to read this value (e.g. to + * inform hardware of the starting timeslot for a payload), the driver may either: + * + * * Read this field during the atomic commit after + * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the + * previous MST states payload start slots have been copied over to the new state. Note + * that a new start slot won't be assigned/removed from this payload until + * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. + * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to + * get committed to hardware by calling drm_crtc_commit_wait() on each of the + * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. + * + * If neither of the two above solutions suffice (e.g. the driver needs to read the start + * slot in the middle of an atomic commit without waiting for some reason), then drivers + * should cache this value themselves after changing payloads. + */ + s8 vc_start_slot; + + /** @vcpi: The Virtual Channel Payload Identifier */ + u8 vcpi; + /** + * @time_slots: + * The number of timeslots allocated to this payload from the source DP Tx to + * the immediate downstream DP Rx + */ + int time_slots; + /** @pbn: The payload bandwidth for this payload */ + int pbn; + + /** @delete: Whether or not we intend to delete this payload during this atomic commit */ + bool delete : 1; + /** @dsc_enabled: Whether or not this payload has DSC enabled */ + bool dsc_enabled : 1; + + /** @next: The list node for this payload */ + struct list_head next; +}; + +/** + * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state + * + * This struct represents the atomic state of the toplevel DisplayPort MST manager + */ +struct drm_dp_mst_topology_state { + /** @base: Base private state for atomic */ + struct drm_private_state base; + + /** @mgr: The topology manager */ + struct drm_dp_mst_topology_mgr *mgr; + + /** + * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may + * modify this to add additional dependencies if needed. + */ + u32 pending_crtc_mask; + /** + * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't + * populated until drm_dp_mst_atomic_wait_for_dependencies() is called. + */ + struct drm_crtc_commit **commit_deps; + /** @num_commit_deps: The number of CRTC commits in @commit_deps */ + size_t num_commit_deps; + + /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */ + u32 payload_mask; + /** @payloads: The list of payloads being created/destroyed in this state */ + struct list_head payloads; + + /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */ + u8 total_avail_slots; + /** @start_slot: The first usable time slot in this topology (1 or 0) */ + u8 start_slot; + + /** + * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this + * out itself. + */ + int pbn_div; +}; + +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + +/** + * struct drm_dp_mst_topology_mgr - DisplayPort MST manager + * + * This struct represents the toplevel displayport MST topology manager. + * There should be one instance of this for every MST capable DP connector + * on the GPU. + */ +struct drm_dp_mst_topology_mgr { + /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + + /** + * @dev: device pointer for adding i2c devices etc. + */ + struct drm_device *dev; + /** + * @cbs: callbacks for connector addition and destruction. + */ + const struct drm_dp_mst_topology_cbs *cbs; + /** + * @max_dpcd_transaction_bytes: maximum number of bytes to read/write + * in one go. + */ + int max_dpcd_transaction_bytes; + /** + * @aux: AUX channel for the DP MST connector this topolgy mgr is + * controlling. + */ + struct drm_dp_aux *aux; + /** + * @max_payloads: maximum number of payloads the GPU can generate. + */ + int max_payloads; + /** + * @conn_base_id: DRM connector ID this mgr is connected to. Only used + * to build the MST connector path value. + */ + int conn_base_id; + + /** + * @up_req_recv: Message receiver state for up requests. + */ + struct drm_dp_sideband_msg_rx up_req_recv; + + /** + * @down_rep_recv: Message receiver state for replies to down + * requests. + */ + struct drm_dp_sideband_msg_rx down_rep_recv; + + /** + * @lock: protects @mst_state, @mst_primary, @dpcd, and + * @payload_id_table_cleared. + */ + struct mutex lock; + + /** + * @probe_lock: Prevents @work and @up_req_work, the only writers of + * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing + * while they update the topology. + */ + struct mutex probe_lock; + + /** + * @mst_state: If this manager is enabled for an MST capable port. False + * if no MST sink/branch devices is connected. + */ + bool mst_state : 1; + + /** + * @payload_id_table_cleared: Whether or not we've cleared the payload + * ID table for @mst_primary. Protected by @lock. + */ + bool payload_id_table_cleared : 1; + + /** + * @payload_count: The number of currently active payloads in hardware. This value is only + * intended to be used internally by MST helpers for payload tracking, and is only safe to + * read/write from the atomic commit (not check) context. + */ + u8 payload_count; + + /** + * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used + * internally by MST helpers for payload tracking, and is only safe to read/write from the + * atomic commit (not check) context. + */ + u8 next_start_slot; + + /** + * @mst_primary: Pointer to the primary/first branch device. + */ + struct drm_dp_mst_branch *mst_primary; + + /** + * @dpcd: Cache of DPCD for primary port. + */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + /** + * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. + */ + u8 sink_count; + + /** + * @funcs: Atomic helper callbacks + */ + const struct drm_private_state_funcs *funcs; + + /** + * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state + */ + struct mutex qlock; + + /** + * @tx_msg_downq: List of pending down requests + */ + struct list_head tx_msg_downq; + + /** + * @tx_waitq: Wait to queue stall for the tx worker. + */ + wait_queue_head_t tx_waitq; + /** + * @work: Probe work. + */ + struct work_struct work; + /** + * @tx_work: Sideband transmit worker. This can nest within the main + * @work worker for each transaction @work launches. + */ + struct work_struct tx_work; + + /** + * @destroy_port_list: List of to be destroyed connectors. + */ + struct list_head destroy_port_list; + /** + * @destroy_branch_device_list: List of to be destroyed branch + * devices. + */ + struct list_head destroy_branch_device_list; + /** + * @delayed_destroy_lock: Protects @destroy_port_list and + * @destroy_branch_device_list. + */ + struct mutex delayed_destroy_lock; + + /** + * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. + * A dedicated WQ makes it possible to drain any requeued work items + * on it. + */ + struct workqueue_struct *delayed_destroy_wq; + + /** + * @delayed_destroy_work: Work item to destroy MST port and branch + * devices, needed to avoid locking inversion. + */ + struct work_struct delayed_destroy_work; + + /** + * @up_req_list: List of pending up requests from the topology that + * need to be processed, in chronological order. + */ + struct list_head up_req_list; + /** + * @up_req_lock: Protects @up_req_list + */ + struct mutex up_req_lock; + /** + * @up_req_work: Work item to process up requests received from the + * topology. Needed to avoid blocking hotplug handling and sideband + * transmissions. + */ + struct work_struct up_req_work; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history_lock: protects + * &drm_dp_mst_port.topology_ref_history and + * &drm_dp_mst_branch.topology_ref_history. + */ + struct mutex topology_ref_history_lock; +#endif +}; + +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id); + +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); + +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + +int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, + const u8 *esi, + u8 *ack, + bool *handled); +void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr); + +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); + +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); + +void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); + +int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_atomic_state *state, + struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + const struct drm_dp_mst_atomic_payload *old_payload, + struct drm_dp_mst_atomic_payload *new_payload); + +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync); + +ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); +ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); + +int drm_dp_mst_connector_late_register(struct drm_connector *connector, + struct drm_dp_mst_port *port); +void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, + struct drm_dp_mst_port *port); + +struct drm_dp_mst_topology_state * +drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * +drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * +drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_atomic_payload * +drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, + struct drm_dp_mst_port *port); +int __must_check +drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn); +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, + struct drm_dp_mst_port *port, + int pbn, bool enable); +int __must_check +drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state); +int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state); +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); +int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); +void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); + +struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); + +static inline struct drm_dp_mst_topology_state * +to_drm_dp_mst_topology_state(struct drm_private_state *state) +{ + return container_of(state, struct drm_dp_mst_topology_state, base); +} + +extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; + +/** + * __drm_dp_mst_state_iter_get - private atomic state iterator function for + * macro-internal use + * @state: &struct drm_atomic_state pointer + * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state + * iteration cursor + * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state + * iteration cursor + * @i: int iteration cursor, for macro-internal use + * + * Used by for_each_oldnew_mst_mgr_in_state(), + * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't + * call this directly. + * + * Returns: + * True if the current &struct drm_private_obj is a &struct + * drm_dp_mst_topology_mgr, false otherwise. + */ +static inline bool +__drm_dp_mst_state_iter_get(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr **mgr, + struct drm_dp_mst_topology_state **old_state, + struct drm_dp_mst_topology_state **new_state, + int i) +{ + struct __drm_private_objs_state *objs_state = &state->private_objs[i]; + + if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) + return false; + + *mgr = to_dp_mst_topology_mgr(objs_state->ptr); + if (old_state) + *old_state = to_dp_mst_topology_state(objs_state->old_state); + if (new_state) + *new_state = to_dp_mst_topology_state(objs_state->new_state); + + return true; +} + +/** + * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology + * managers in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking both old and new state. This is useful in places where the state + * delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) + +/** + * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the old state. This is useful in disable functions, where we + * need the old state the hardware is still in. + */ +#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) + +/** + * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the new state. This is useful in enable functions, where we + * need the new state the hardware should be in when the atomic commit + * operation has completed. + */ +#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) + +#endif diff --git a/include/drm/display/drm_dsc.h b/include/drm/display/drm_dsc.h new file mode 100644 index 000000000..bc90273d0 --- /dev/null +++ b/include/drm/display/drm_dsc.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#ifndef DRM_DSC_H_ +#define DRM_DSC_H_ + +#include <drm/display/drm_dp.h> + +/* VESA Display Stream Compression DSC 1.2 constants */ +#define DSC_NUM_BUF_RANGES 15 +#define DSC_MUX_WORD_SIZE_8_10_BPC 48 +#define DSC_MUX_WORD_SIZE_12_BPC 64 +#define DSC_RC_PIXELS_PER_GROUP 3 +#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095 +#define DSC_RANGE_BPG_OFFSET_MASK 0x3f + +/* DSC Rate Control Constants */ +#define DSC_RC_MODEL_SIZE_CONST 8192 +#define DSC_RC_EDGE_FACTOR_CONST 6 +#define DSC_RC_TGT_OFFSET_HI_CONST 3 +#define DSC_RC_TGT_OFFSET_LO_CONST 3 + +/* DSC PPS constants and macros */ +#define DSC_PPS_VERSION_MAJOR_SHIFT 4 +#define DSC_PPS_BPC_SHIFT 4 +#define DSC_PPS_MSB_SHIFT 8 +#define DSC_PPS_LSB_MASK (0xFF << 0) +#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8) +#define DSC_PPS_VBR_EN_SHIFT 2 +#define DSC_PPS_SIMPLE422_SHIFT 3 +#define DSC_PPS_CONVERT_RGB_SHIFT 4 +#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5 +#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8) +#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8) +#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4 +#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 +#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 +#define DSC_PPS_NATIVE_420_SHIFT 1 +#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 +#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 +#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 + +/** + * struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters + * + * This defines different rate control parameters used by the DSC engine + * to compress the frame. + */ +struct drm_dsc_rc_range_parameters { + /** + * @range_min_qp: Min Quantization Parameters allowed for this range + */ + u8 range_min_qp; + /** + * @range_max_qp: Max Quantization Parameters allowed for this range + */ + u8 range_max_qp; + /** + * @range_bpg_offset: + * Bits/group offset to apply to target for this group + */ + u8 range_bpg_offset; +}; + +/** + * struct drm_dsc_config - Parameters required to configure DSC + * + * Driver populates this structure with all the parameters required + * to configure the display stream compression on the source. + */ +struct drm_dsc_config { + /** + * @line_buf_depth: + * Bits per component for previous reconstructed line buffer + */ + u8 line_buf_depth; + /** + * @bits_per_component: Bits per component to code (8/10/12) + */ + u8 bits_per_component; + /** + * @convert_rgb: + * Flag to indicate if RGB - YCoCg conversion is needed + * True if RGB input, False if YCoCg input + */ + bool convert_rgb; + /** + * @slice_count: Number fo slices per line used by the DSC encoder + */ + u8 slice_count; + /** + * @slice_width: Width of each slice in pixels + */ + u16 slice_width; + /** + * @slice_height: Slice height in pixels + */ + u16 slice_height; + /** + * @simple_422: True if simple 4_2_2 mode is enabled else False + */ + bool simple_422; + /** + * @pic_width: Width of the input display frame in pixels + */ + u16 pic_width; + /** + * @pic_height: Vertical height of the input display frame + */ + u16 pic_height; + /** + * @rc_tgt_offset_high: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_high; + /** + * @rc_tgt_offset_low: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_low; + /** + * @bits_per_pixel: + * Target bits per pixel with 4 fractional bits, bits_per_pixel << 4 + */ + u16 bits_per_pixel; + /** + * @rc_edge_factor: + * Factor to determine if an edge is present based on the bits produced + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit1: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit1; + /** + * @rc_quant_incr_limit0: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit0; + /** + * @initial_xmit_delay: + * Number of pixels to delay the initial transmission + */ + u16 initial_xmit_delay; + /** + * @initial_dec_delay: + * Initial decoder delay, number of pixel times that the decoder + * accumulates data in its rate buffer before starting to decode + * and output pixels. + */ + u16 initial_dec_delay; + /** + * @block_pred_enable: + * True if block prediction is used to code any groups within the + * picture. False if BP not used + */ + bool block_pred_enable; + /** + * @first_line_bpg_offset: + * Number of additional bits allocated for each group on the first + * line of slice. + */ + u8 first_line_bpg_offset; + /** + * @initial_offset: Value to use for RC model offset at slice start + */ + u16 initial_offset; + /** + * @rc_buf_thresh: Thresholds defining each of the buffer ranges + */ + u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_params: + * Parameters for each of the RC ranges defined in + * &struct drm_dsc_rc_range_parameters + */ + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; + /** + * @rc_model_size: Total size of RC model + */ + u16 rc_model_size; + /** + * @flatness_min_qp: Minimum QP where flatness information is sent + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: Maximum QP where flatness information is sent + */ + u8 flatness_max_qp; + /** + * @initial_scale_value: Initial value for the scale factor + */ + u8 initial_scale_value; + /** + * @scale_decrement_interval: + * Specifies number of group times between decrementing the scale factor + * at beginning of a slice. + */ + u16 scale_decrement_interval; + /** + * @scale_increment_interval: + * Number of group times between incrementing the scale factor value + * used at the beginning of a slice. + */ + u16 scale_increment_interval; + /** + * @nfl_bpg_offset: Non first line BPG offset to be used + */ + u16 nfl_bpg_offset; + /** + * @slice_bpg_offset: BPG offset used to enforce slice bit + */ + u16 slice_bpg_offset; + /** + * @final_offset: Final RC linear transformation offset value + */ + u16 final_offset; + /** + * @vbr_enable: True if VBR mode is enabled, false if disabled + */ + bool vbr_enable; + /** + * @mux_word_size: Mux word size (in bits) for SSM mode + */ + u8 mux_word_size; + /** + * @slice_chunk_size: + * The (max) size in bytes of the "chunks" that are used in slice + * multiplexing. + */ + u16 slice_chunk_size; + /** + * @rc_bits: Rate control buffer size in bits + */ + u16 rc_bits; + /** + * @dsc_version_minor: DSC minor version + */ + u8 dsc_version_minor; + /** + * @dsc_version_major: DSC major version + */ + u8 dsc_version_major; + /** + * @native_422: True if Native 4:2:2 supported, else false + */ + bool native_422; + /** + * @native_420: True if Native 4:2:0 supported else false. + */ + bool native_420; + /** + * @second_line_bpg_offset: + * Additional bits/grp for seconnd line of slice for native 4:2:0 + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * Num of bits deallocated for each grp that is not in second line of + * slice + */ + u16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * Offset adjustment for second line in Native 4:2:0 mode + */ + u16 second_line_offset_adj; +}; + +/** + * struct drm_dsc_picture_parameter_set - Represents 128 bytes of + * Picture Parameter Set + * + * The VESA DSC standard defines picture parameter set (PPS) which display + * stream compression encoders must communicate to decoders. + * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in + * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2. + * The PPS fields that span over more than a byte should be stored in Big Endian + * format. + */ +struct drm_dsc_picture_parameter_set { + /** + * @dsc_version: + * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC + * PPS0[7:4] - dsc_version_major: Contains major version of DSC + */ + u8 dsc_version; + /** + * @pps_identifier: + * PPS1[7:0] - Application specific identifier that can be + * used to differentiate between different PPS tables. + */ + u8 pps_identifier; + /** + * @pps_reserved: + * PPS2[7:0]- RESERVED Byte + */ + u8 pps_reserved; + /** + * @pps_3: + * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to + * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits, + * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits, + * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2. + * PPS3[7:4] - bits_per_component: Bits per component for the original + * pixels of the encoded picture. + * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2) + * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also + * allowed only when dsc_minor_version = 0x2) + */ + u8 pps_3; + /** + * @pps_4: + * PPS4[1:0] -These are the most significant 2 bits of + * compressed BPP bits_per_pixel[9:0] syntax element. + * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled + * PPS4[3] - simple_422: Indicates if decoder drops samples to + * reconstruct the 4:2:2 picture. + * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is + * active. + * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any + * groups in picture + * PPS4[7:6] - Reseved bits + */ + u8 pps_4; + /** + * @bits_per_pixel_low: + * PPS5[7:0] - This indicates the lower significant 8 bits of + * the compressed BPP bits_per_pixel[9:0] element. + */ + u8 bits_per_pixel_low; + /** + * @pic_height: + * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows + * within the raster. + */ + __be16 pic_height; + /** + * @pic_width: + * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within + * the raster. + */ + __be16 pic_width; + /** + * @slice_height: + * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels. + */ + __be16 slice_height; + /** + * @slice_width: + * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels. + */ + __be16 slice_width; + /** + * @chunk_size: + * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks + * that are used for slice multiplexing. + */ + __be16 chunk_size; + /** + * @initial_xmit_delay_high: + * PPS16[1:0] - Most Significant two bits of initial transmission delay. + * It specifies the number of pixel times that the encoder waits before + * transmitting data from its rate buffer. + * PPS16[7:2] - Reserved + */ + u8 initial_xmit_delay_high; + /** + * @initial_xmit_delay_low: + * PPS17[7:0] - Least significant 8 bits of initial transmission delay. + */ + u8 initial_xmit_delay_low; + /** + * @initial_dec_delay: + * + * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number + * of pixel times that the decoder accumulates data in its rate buffer + * before starting to decode and output pixels. + */ + __be16 initial_dec_delay; + /** + * @pps20_reserved: + * + * PPS20[7:0] - Reserved + */ + u8 pps20_reserved; + /** + * @initial_scale_value: + * PPS21[5:0] - Initial rcXformScale factor used at beginning + * of a slice. + * PPS21[7:6] - Reserved + */ + u8 initial_scale_value; + /** + * @scale_increment_interval: + * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing + * the rcXformScale factor at end of a slice. + */ + __be16 scale_increment_interval; + /** + * @scale_decrement_interval_high: + * PPS24[3:0] - Higher 4 bits indicating number of group times between + * decrementing the rcXformScale factor at beginning of a slice. + * PPS24[7:4] - Reserved + */ + u8 scale_decrement_interval_high; + /** + * @scale_decrement_interval_low: + * PPS25[7:0] - Lower 8 bits of scale decrement interval + */ + u8 scale_decrement_interval_low; + /** + * @pps26_reserved: + * PPS26[7:0] + */ + u8 pps26_reserved; + /** + * @first_line_bpg_offset: + * PPS27[4:0] - Number of additional bits that are allocated + * for each group on first line of a slice. + * PPS27[7:5] - Reserved + */ + u8 first_line_bpg_offset; + /** + * @nfl_bpg_offset: + * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits + * deallocated for each group for groups after the first line of slice. + */ + __be16 nfl_bpg_offset; + /** + * @slice_bpg_offset: + * PPS30, PPS31[7:0] - Number of bits that are deallocated for each + * group to enforce the slice constraint. + */ + __be16 slice_bpg_offset; + /** + * @initial_offset: + * PPS32,33[7:0] - Initial value for rcXformOffset + */ + __be16 initial_offset; + /** + * @final_offset: + * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset + */ + __be16 final_offset; + /** + * @flatness_min_qp: + * PPS36[4:0] - Minimum QP at which flatness is signaled and + * flatness QP adjustment is made. + * PPS36[7:5] - Reserved + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: + * PPS37[4:0] - Max QP at which flatness is signalled and + * the flatness adjustment is made. + * PPS37[7:5] - Reserved + */ + u8 flatness_max_qp; + /** + * @rc_model_size: + * PPS38,39[7:0] - Number of bits within RC Model. + */ + __be16 rc_model_size; + /** + * @rc_edge_factor: + * PPS40[3:0] - Ratio of current activity vs, previous + * activity to determine presence of edge. + * PPS40[7:4] - Reserved + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit0: + * PPS41[4:0] - QP threshold used in short term RC + * PPS41[7:5] - Reserved + */ + u8 rc_quant_incr_limit0; + /** + * @rc_quant_incr_limit1: + * PPS42[4:0] - QP threshold used in short term RC + * PPS42[7:5] - Reserved + */ + u8 rc_quant_incr_limit1; + /** + * @rc_tgt_offset: + * PPS43[3:0] - Lower end of the variability range around the target + * bits per group that is allowed by short term RC. + * PPS43[7:4]- Upper end of the variability range around the target + * bits per group that i allowed by short term rc. + */ + u8 rc_tgt_offset; + /** + * @rc_buf_thresh: + * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for + * the 15 ranges defined by 14 thresholds. + */ + u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_parameters: + * PPS58[7:0] - PPS87[7:0] + * Parameters that correspond to each of the 15 ranges. + */ + __be16 rc_range_parameters[DSC_NUM_BUF_RANGES]; + /** + * @native_422_420: + * PPS88[0] - 0 = Native 4:2:2 not used + * 1 = Native 4:2:2 used + * PPS88[1] - 0 = Native 4:2:0 not use + * 1 = Native 4:2:0 used + * PPS88[7:2] - Reserved 6 bits + */ + u8 native_422_420; + /** + * @second_line_bpg_offset: + * PPS89[4:0] - Additional bits/group budget for the + * second line of a slice in Native 4:2:0 mode. + * Set to 0 if DSC minor version is 1 or native420 is 0. + * PPS89[7:5] - Reserved + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated + * for each group that is not in the second line of a slice. + */ + __be16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second + * line in Native 4:2:0 mode. + */ + __be16 second_line_offset_adj; + /** + * @pps_long_94_reserved: + * PPS 94, 95, 96, 97 - Reserved + */ + u32 pps_long_94_reserved; + /** + * @pps_long_98_reserved: + * PPS 98, 99, 100, 101 - Reserved + */ + u32 pps_long_98_reserved; + /** + * @pps_long_102_reserved: + * PPS 102, 103, 104, 105 - Reserved + */ + u32 pps_long_102_reserved; + /** + * @pps_long_106_reserved: + * PPS 106, 107, 108, 109 - reserved + */ + u32 pps_long_106_reserved; + /** + * @pps_long_110_reserved: + * PPS 110, 111, 112, 113 - reserved + */ + u32 pps_long_110_reserved; + /** + * @pps_long_114_reserved: + * PPS 114 - 117 - reserved + */ + u32 pps_long_114_reserved; + /** + * @pps_long_118_reserved: + * PPS 118 - 121 - reserved + */ + u32 pps_long_118_reserved; + /** + * @pps_long_122_reserved: + * PPS 122- 125 - reserved + */ + u32 pps_long_122_reserved; + /** + * @pps_short_126_reserved: + * PPS 126, 127 - reserved + */ + __be16 pps_short_126_reserved; +} __packed; + +/** + * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter + * Set Metadata + * + * This structure represents the DSC PPS infoframe required to send the Picture + * Parameter Set metadata required before enabling VESA Display Stream + * Compression. This is based on the DP Secondary Data Packet structure and + * comprises of SDP Header as defined &struct dp_sdp_header in drm_dp_helper.h + * and PPS payload defined in &struct drm_dsc_picture_parameter_set. + * + * @pps_header: Header for PPS as per DP SDP header format of type + * &struct dp_sdp_header + * @pps_payload: PPS payload fields as per DSC specification Table 4-1 + * as represented in &struct drm_dsc_picture_parameter_set + */ +struct drm_dsc_pps_infoframe { + struct dp_sdp_header pps_header; + struct drm_dsc_picture_parameter_set pps_payload; +} __packed; + +#endif /* _DRM_DSC_H_ */ diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h new file mode 100644 index 000000000..8b41edbba --- /dev/null +++ b/include/drm/display/drm_dsc_helper.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#ifndef DRM_DSC_HELPER_H_ +#define DRM_DSC_HELPER_H_ + +#include <drm/display/drm_dsc.h> + +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); +int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, + const struct drm_dsc_config *dsc_cfg); +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); + +#endif /* _DRM_DSC_HELPER_H_ */ + diff --git a/include/drm/display/drm_hdcp.h b/include/drm/display/drm_hdcp.h new file mode 100644 index 000000000..96a99b137 --- /dev/null +++ b/include/drm/display/drm_hdcp.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#ifndef _DRM_HDCP_H_ +#define _DRM_HDCP_H_ + +#include <linux/types.h> + +/* Period of hdcp checks (to ensure we're still authenticated) */ +#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16) +#define DRM_HDCP2_CHECK_PERIOD_MS 500 + +/* Shared lengths/masks between HDMI/DVI/DisplayPort */ +#define DRM_HDCP_AN_LEN 8 +#define DRM_HDCP_BSTATUS_LEN 2 +#define DRM_HDCP_KSV_LEN 5 +#define DRM_HDCP_RI_LEN 2 +#define DRM_HDCP_V_PRIME_PART_LEN 4 +#define DRM_HDCP_V_PRIME_NUM_PARTS 5 +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) +#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) +#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) + +/* Slave address for the HDCP registers in the receiver */ +#define DRM_HDCP_DDC_ADDR 0x3A + +/* Value to use at the end of the SHA-1 bytestream used for repeaters */ +#define DRM_HDCP_SHA1_TERMINATOR 0x80 + +/* HDCP register offsets for HDMI/DVI devices */ +#define DRM_HDCP_DDC_BKSV 0x00 +#define DRM_HDCP_DDC_RI_PRIME 0x08 +#define DRM_HDCP_DDC_AKSV 0x10 +#define DRM_HDCP_DDC_AN 0x18 +#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4) +#define DRM_HDCP_DDC_BCAPS 0x40 +#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6) +#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5) +#define DRM_HDCP_DDC_BSTATUS 0x41 +#define DRM_HDCP_DDC_KSV_FIFO 0x43 + +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 + +/* Protocol message definition for HDCP2.2 specification */ +/* + * Protected content streams are classified into 2 types: + * - Type0: Can be transmitted with HDCP 1.4+ + * - Type1: Can be transmitted with HDCP 2.2+ + */ +#define HDCP_STREAM_TYPE0 0x00 +#define HDCP_STREAM_TYPE1 0x01 + +/* HDCP2.2 Msg IDs */ +#define HDCP_2_2_NULL_MSG 1 +#define HDCP_2_2_AKE_INIT 2 +#define HDCP_2_2_AKE_SEND_CERT 3 +#define HDCP_2_2_AKE_NO_STORED_KM 4 +#define HDCP_2_2_AKE_STORED_KM 5 +#define HDCP_2_2_AKE_SEND_HPRIME 7 +#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8 +#define HDCP_2_2_LC_INIT 9 +#define HDCP_2_2_LC_SEND_LPRIME 10 +#define HDCP_2_2_SKE_SEND_EKS 11 +#define HDCP_2_2_REP_SEND_RECVID_LIST 12 +#define HDCP_2_2_REP_SEND_ACK 15 +#define HDCP_2_2_REP_STREAM_MANAGE 16 +#define HDCP_2_2_REP_STREAM_READY 17 + +#define HDCP_2_2_RTX_LEN 8 +#define HDCP_2_2_RRX_LEN 8 + +#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128 +#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3 +#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \ + HDCP_2_2_K_PUB_RX_EXP_E_LEN) + +#define HDCP_2_2_DCP_LLC_SIG_LEN 384 + +#define HDCP_2_2_E_KPUB_KM_LEN 128 +#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16) +#define HDCP_2_2_H_PRIME_LEN 32 +#define HDCP_2_2_E_KH_KM_LEN 16 +#define HDCP_2_2_RN_LEN 8 +#define HDCP_2_2_L_PRIME_LEN 32 +#define HDCP_2_2_E_DKEY_KS_LEN 16 +#define HDCP_2_2_RIV_LEN 8 +#define HDCP_2_2_SEQ_NUM_LEN 3 +#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2) +#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN +#define HDCP_2_2_MAX_DEVICE_COUNT 31 +#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \ + HDCP_2_2_MAX_DEVICE_COUNT) +#define HDCP_2_2_MPRIME_LEN 32 + +/* Following Macros take a byte at a time for bit(s) masking */ +/* + * TODO: HDCP_2_2_MAX_CONTENT_STREAMS_CNT is based upon actual + * H/W MST streams capacity. + * This required to be moved out to platform specific header. + */ +#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 4 +#define HDCP_2_2_TXCAP_MASK_LEN 2 +#define HDCP_2_2_RXCAPS_LEN 3 +#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1)) +#define HDCP_2_2_RXINFO_LEN 2 + +/* HDCP1.x compliant device in downstream */ +#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0)) + +/* HDCP2.0 Compliant repeater in downstream */ +#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1)) +#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2)) +#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3)) +#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4) +#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0)) +#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1) + +struct hdcp2_cert_rx { + u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN]; + u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN]; + u8 reserved[2]; + u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN]; +} __packed; + +struct hdcp2_streamid_type { + u8 stream_id; + u8 stream_type; +} __packed; + +/* + * The TxCaps field specified in the HDCP HDMI, DP specs + * This field is big endian as specified in the errata. + */ +struct hdcp2_tx_caps { + /* Transmitter must set this to 0x2 */ + u8 version; + + /* Reserved for HDCP and DP Spec. Read as Zero */ + u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN]; +} __packed; + +/* Main structures for HDCP2.2 protocol communication */ +struct hdcp2_ake_init { + u8 msg_id; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +struct hdcp2_ake_send_cert { + u8 msg_id; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct hdcp2_ake_no_stored_km { + u8 msg_id; + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; +} __packed; + +struct hdcp2_ake_stored_km { + u8 msg_id; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; +} __packed; + +struct hdcp2_ake_send_hprime { + u8 msg_id; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct hdcp2_ake_send_pairing_info { + u8 msg_id; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct hdcp2_lc_init { + u8 msg_id; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +struct hdcp2_lc_send_lprime { + u8 msg_id; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct hdcp2_ske_send_eks { + u8 msg_id; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 riv[HDCP_2_2_RIV_LEN]; +} __packed; + +struct hdcp2_rep_send_receiverid_list { + u8 msg_id; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct hdcp2_rep_send_ack { + u8 msg_id; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +struct hdcp2_rep_stream_manage { + u8 msg_id; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT]; +} __packed; + +struct hdcp2_rep_stream_ready { + u8 msg_id; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; +} __packed; + +/* HDCP2.2 TIMEOUTs in mSec */ +#define HDCP_2_2_CERT_TIMEOUT_MS 100 +#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110 +#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000 +#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200 +#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7 +#define HDCP_2_2_PAIRING_TIMEOUT_MS 200 +#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5 +#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20 +#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16 +#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000 +#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100 + +/* HDMI HDCP2.2 Register Offsets */ +#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50 +#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60 +#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70 +#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80 +#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0 + +#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2) +#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02 +#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF +#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200 + +/* Below macros take a byte at a time and mask the bit(s) */ +#define HDCP_2_2_HDMI_RXSTATUS_LEN 2 +#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) +#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) +#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) + +/* + * Helper functions to convert 24bit big endian hdcp sequence number to + * host format and back + */ +static inline +u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) +{ + return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16); +} + +static inline +void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) +{ + seq_num[0] = val >> 16; + seq_num[1] = val >> 8; + seq_num[2] = val; +} + +#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024) +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_SRM_ID_MASK (0xF << 4) +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 +#define DRM_HDCP_2_SRM_ID 0x9 +#define DRM_HDCP_2_INDICATOR 0x1 +#define DRM_HDCP_2_INDICATOR_MASK 0xF +#define DRM_HDCP_2_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_2_DCP_SIG_SIZE 384 +#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4 +#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6) + +struct hdcp_srm_header { + u8 srm_id; + u8 reserved; + __be16 srm_version; + u8 srm_gen_no; +} __packed; + +/* Content Type classification for HDCP2.2 vs others */ +#define DRM_MODE_HDCP_CONTENT_TYPE0 0 +#define DRM_MODE_HDCP_CONTENT_TYPE1 1 + +#endif diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h new file mode 100644 index 000000000..8aaf87bf2 --- /dev/null +++ b/include/drm/display/drm_hdcp_helper.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#ifndef _DRM_HDCP_HELPER_H_INCLUDED_ +#define _DRM_HDCP_HELPER_H_INCLUDED_ + +#include <drm/display/drm_hdcp.h> + +struct drm_device; +struct drm_connector; + +int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count); +int drm_connector_attach_content_protection_property(struct drm_connector *connector, + bool hdcp_content_type); +void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val); + +#endif diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h new file mode 100644 index 000000000..76d234826 --- /dev/null +++ b/include/drm/display/drm_hdmi_helper.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_HDMI_HELPER +#define DRM_HDMI_HELPER + +#include <linux/hdmi.h> + +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; + +void +drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state); + +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +#endif diff --git a/include/drm/display/drm_scdc.h b/include/drm/display/drm_scdc.h new file mode 100644 index 000000000..3d58f37e8 --- /dev/null +++ b/include/drm/display/drm_scdc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_H +#define DRM_SCDC_H + +#define SCDC_SINK_VERSION 0x01 + +#define SCDC_SOURCE_VERSION 0x02 + +#define SCDC_UPDATE_0 0x10 +#define SCDC_READ_REQUEST_TEST (1 << 2) +#define SCDC_CED_UPDATE (1 << 1) +#define SCDC_STATUS_UPDATE (1 << 0) + +#define SCDC_UPDATE_1 0x11 + +#define SCDC_TMDS_CONFIG 0x20 +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1) +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1) +#define SCDC_SCRAMBLING_ENABLE (1 << 0) + +#define SCDC_SCRAMBLER_STATUS 0x21 +#define SCDC_SCRAMBLING_STATUS (1 << 0) + +#define SCDC_CONFIG_0 0x30 +#define SCDC_READ_REQUEST_ENABLE (1 << 0) + +#define SCDC_STATUS_FLAGS_0 0x40 +#define SCDC_CH2_LOCK (1 << 3) +#define SCDC_CH1_LOCK (1 << 2) +#define SCDC_CH0_LOCK (1 << 1) +#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK) +#define SCDC_CLOCK_DETECT (1 << 0) + +#define SCDC_STATUS_FLAGS_1 0x41 + +#define SCDC_ERR_DET_0_L 0x50 +#define SCDC_ERR_DET_0_H 0x51 +#define SCDC_ERR_DET_1_L 0x52 +#define SCDC_ERR_DET_1_H 0x53 +#define SCDC_ERR_DET_2_L 0x54 +#define SCDC_ERR_DET_2_H 0x55 +#define SCDC_CHANNEL_VALID (1 << 7) + +#define SCDC_ERR_DET_CHECKSUM 0x56 + +#define SCDC_TEST_CONFIG_0 0xc0 +#define SCDC_TEST_READ_REQUEST (1 << 7) +#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f) + +#define SCDC_MANUFACTURER_IEEE_OUI 0xd0 +#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3 + +#define SCDC_DEVICE_ID 0xd3 +#define SCDC_DEVICE_ID_SIZE 8 + +#define SCDC_DEVICE_HARDWARE_REVISION 0xdb +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf) +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf) + +#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc +#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd + +#define SCDC_MANUFACTURER_SPECIFIC 0xde +#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34 + +#endif diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h new file mode 100644 index 000000000..ded01fd94 --- /dev/null +++ b/include/drm/display/drm_scdc_helper.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_HELPER_H +#define DRM_SCDC_HELPER_H + +#include <linux/types.h> + +#include <drm/display/drm_scdc.h> + +struct i2c_adapter; + +ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, + size_t size); +ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, + const void *buffer, size_t size); + +/** + * drm_scdc_readb - read a single byte from SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Reads a single byte from SCDC. This is a convenience wrapper around the + * drm_scdc_read() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, + u8 *value) +{ + return drm_scdc_read(adapter, offset, value, sizeof(*value)); +} + +/** + * drm_scdc_writeb - write a single byte to SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Writes a single byte to SCDC. This is a convenience wrapper around the + * drm_scdc_write() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, + u8 value) +{ + return drm_scdc_write(adapter, offset, &value, sizeof(value)); +} + +bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); + +bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); + +#endif diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h new file mode 100644 index 000000000..cbe33b49f --- /dev/null +++ b/include/drm/drm_aperture.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_APERTURE_H_ +#define _DRM_APERTURE_H_ + +#include <linux/types.h> + +struct drm_device; +struct drm_driver; +struct pci_dev; + +int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, + resource_size_t size); + +int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, + const struct drm_driver *req_driver); + +int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const struct drm_driver *req_driver); + +/** + * drm_aperture_remove_framebuffers - remove all existing framebuffers + * @req_driver: requesting DRM driver + * + * This function removes all graphics device drivers. Use this function on systems + * that can have their framebuffer located anywhere in memory. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +static inline int +drm_aperture_remove_framebuffers(const struct drm_driver *req_driver) +{ + return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, + req_driver); +} + +#endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h new file mode 100644 index 000000000..10b1990bc --- /dev/null +++ b/include/drm/drm_atomic.h @@ -0,0 +1,1143 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_H_ +#define DRM_ATOMIC_H_ + +#include <drm/drm_crtc.h> +#include <drm/drm_util.h> + +/** + * struct drm_crtc_commit - track modeset commits on a CRTC + * + * This structure is used to track pending modeset changes and atomic commit on + * a per-CRTC basis. Since updating the list should never block, this structure + * is reference counted to allow waiters to safely wait on an event to complete, + * without holding any locks. + * + * It has 3 different events in total to allow a fine-grained synchronization + * between outstanding updates:: + * + * atomic commit thread hardware + * + * write new state into hardware ----> ... + * signal hw_done + * switch to new state on next + * ... v/hblank + * + * wait for buffers to show up ... + * + * ... send completion irq + * irq handler signals flip_done + * cleanup old buffers + * + * signal cleanup_done + * + * wait for flip_done <---- + * clean up atomic state + * + * The important bit to know is that &cleanup_done is the terminal event, but the + * ordering between &flip_done and &hw_done is entirely up to the specific driver + * and modeset state change. + * + * For an implementation of how to use this look at + * drm_atomic_helper_setup_commit() from the atomic helper library. + * + * See also drm_crtc_commit_wait(). + */ +struct drm_crtc_commit { + /** + * @crtc: + * + * DRM CRTC for this commit. + */ + struct drm_crtc *crtc; + + /** + * @ref: + * + * Reference count for this structure. Needed to allow blocking on + * completions without the risk of the completion disappearing + * meanwhile. + */ + struct kref ref; + + /** + * @flip_done: + * + * Will be signaled when the hardware has flipped to the new set of + * buffers. Signals at the same time as when the drm event for this + * commit is sent to userspace, or when an out-fence is singalled. Note + * that for most hardware, in most cases this happens after @hw_done is + * signalled. + * + * Completion of this stage is signalled implicitly by calling + * drm_crtc_send_vblank_event() on &drm_crtc_state.event. + */ + struct completion flip_done; + + /** + * @hw_done: + * + * Will be signalled when all hw register changes for this commit have + * been written out. Especially when disabling a pipe this can be much + * later than @flip_done, since that can signal already when the + * screen goes black, whereas to fully shut down a pipe more register + * I/O is required. + * + * Note that this does not need to include separately reference-counted + * resources like backing storage buffer pinning, or runtime pm + * management. + * + * Drivers should call drm_atomic_helper_commit_hw_done() to signal + * completion of this stage. + */ + struct completion hw_done; + + /** + * @cleanup_done: + * + * Will be signalled after old buffers have been cleaned up by calling + * drm_atomic_helper_cleanup_planes(). Since this can only happen after + * a vblank wait completed it might be a bit later. This completion is + * useful to throttle updates and avoid hardware updates getting ahead + * of the buffer cleanup too much. + * + * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal + * completion of this stage. + */ + struct completion cleanup_done; + + /** + * @commit_entry: + * + * Entry on the per-CRTC &drm_crtc.commit_list. Protected by + * $drm_crtc.commit_lock. + */ + struct list_head commit_entry; + + /** + * @event: + * + * &drm_pending_vblank_event pointer to clean up private events. + */ + struct drm_pending_vblank_event *event; + + /** + * @abort_completion: + * + * A flag that's set after drm_atomic_helper_setup_commit() takes a + * second reference for the completion of $drm_crtc_state.event. It's + * used by the free code to remove the second reference if commit fails. + */ + bool abort_completion; +}; + +struct __drm_planes_state { + struct drm_plane *ptr; + struct drm_plane_state *state, *old_state, *new_state; +}; + +struct __drm_crtcs_state { + struct drm_crtc *ptr; + struct drm_crtc_state *state, *old_state, *new_state; + + /** + * @commit: + * + * A reference to the CRTC commit object that is kept for use by + * drm_atomic_helper_wait_for_flip_done() after + * drm_atomic_helper_commit_hw_done() is called. This ensures that a + * concurrent commit won't free a commit object that is still in use. + */ + struct drm_crtc_commit *commit; + + s32 __user *out_fence_ptr; + u64 last_vblank_count; +}; + +struct __drm_connnectors_state { + struct drm_connector *ptr; + struct drm_connector_state *state, *old_state, *new_state; + /** + * @out_fence_ptr: + * + * User-provided pointer which the kernel uses to return a sync_file + * file descriptor. Used by writeback connectors to signal completion of + * the writeback. + */ + s32 __user *out_fence_ptr; +}; + +struct drm_private_obj; +struct drm_private_state; + +/** + * struct drm_private_state_funcs - atomic state functions for private objects + * + * These hooks are used by atomic helpers to create, swap and destroy states of + * private objects. The structure itself is used as a vtable to identify the + * associated private object type. Each private object type that needs to be + * added to the atomic states is expected to have an implementation of these + * hooks and pass a pointer to its drm_private_state_funcs struct to + * drm_atomic_get_private_obj_state(). + */ +struct drm_private_state_funcs { + /** + * @atomic_duplicate_state: + * + * Duplicate the current state of the private object and return it. It + * is an error to call this before obj->state has been initialized. + * + * RETURNS: + * + * Duplicated atomic state or NULL when obj->state is not + * initialized or allocation failed. + */ + struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); + + /** + * @atomic_destroy_state: + * + * Frees the private object state created with @atomic_duplicate_state. + */ + void (*atomic_destroy_state)(struct drm_private_obj *obj, + struct drm_private_state *state); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_private_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_private_obj_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_private_state *state); +}; + +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + * + * All private objects must be initialized before the DRM device they are + * attached to is registered to the DRM subsystem (call to drm_dev_register()) + * and should stay around until this DRM device is unregistered (call to + * drm_dev_unregister()). In other words, private objects lifetime is tied + * to the DRM device lifetime. This implies that: + * + * 1/ all calls to drm_atomic_private_obj_init() must be done before calling + * drm_dev_register() + * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling + * drm_dev_unregister() + * + * If that private object is used to store a state shared by multiple + * CRTCs, proper care must be taken to ensure that non-blocking commits are + * properly ordered to avoid a use-after-free issue. + * + * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two + * different &drm_crtc using different &drm_plane and &drm_connector, so with no + * resources shared, there's no guarantee on which commit is going to happen + * first. However, the second &drm_atomic_commit will consider the first + * &drm_private_obj its old state, and will be in charge of freeing it whenever + * the second &drm_atomic_commit is done. + * + * If the first &drm_atomic_commit happens after it, it will consider its + * &drm_private_obj the new state and will be likely to access it, resulting in + * an access to a freed memory region. Drivers should store (and get a reference + * to) the &drm_crtc_commit structure in our private state in + * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that + * commit to complete as the first step of + * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to + * drm_atomic_helper_wait_for_dependencies(). + */ +struct drm_private_obj { + /** + * @head: List entry used to attach a private object to a &drm_device + * (queued to &drm_mode_config.privobj_list). + */ + struct list_head head; + + /** + * @lock: Modeset lock to protect the state object. + */ + struct drm_modeset_lock lock; + + /** + * @state: Current atomic state for this driver private object. + */ + struct drm_private_state *state; + + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ + const struct drm_private_state_funcs *funcs; +}; + +/** + * drm_for_each_privobj() - private object iterator + * + * @privobj: pointer to the current private object. Updated after each + * iteration + * @dev: the DRM device we want get private objects from + * + * Allows one to iterate over all private objects attached to @dev + */ +#define drm_for_each_privobj(privobj, dev) \ + list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) + +/** + * struct drm_private_state - base struct for driver private object state + * + * Currently only contains a backpointer to the overall atomic update, + * and the relevant private object but in the future also might hold + * synchronization information similar to e.g. &drm_crtc.commit. + */ +struct drm_private_state { + /** + * @state: backpointer to global drm_atomic_state + */ + struct drm_atomic_state *state; + + /** + * @obj: backpointer to the private object + */ + struct drm_private_obj *obj; +}; + +struct __drm_private_objs_state { + struct drm_private_obj *ptr; + struct drm_private_state *state, *old_state, *new_state; +}; + +/** + * struct drm_atomic_state - the global state object for atomic updates + * @ref: count of all references to this state (will not be freed until zero) + * @dev: parent DRM device + * @async_update: hint for asynchronous plane update + * @planes: pointer to array of structures with per-plane data + * @crtcs: pointer to array of CRTC pointers + * @num_connector: size of the @connectors and @connector_states arrays + * @connectors: pointer to array of structures with per-connector data + * @num_private_objs: size of the @private_objs array + * @private_objs: pointer to array of private object pointers + * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). + */ +struct drm_atomic_state { + struct kref ref; + + struct drm_device *dev; + + /** + * @allow_modeset: + * + * Allow full modeset. This is used by the ATOMIC IOCTL handler to + * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should + * never consult this flag, instead looking at the output of + * drm_atomic_crtc_needs_modeset(). + */ + bool allow_modeset : 1; + /** + * @legacy_cursor_update: + * + * Hint to enforce legacy cursor IOCTL semantics. + * + * WARNING: This is thoroughly broken and pretty much impossible to + * implement correctly. Drivers must ignore this and should instead + * implement &drm_plane_helper_funcs.atomic_async_check and + * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this + * flag are not allowed. + */ + bool legacy_cursor_update : 1; + bool async_update : 1; + /** + * @duplicated: + * + * Indicates whether or not this atomic state was duplicated using + * drm_atomic_helper_duplicate_state(). Drivers and atomic helpers + * should use this to fixup normal inconsistencies in duplicated + * states. + */ + bool duplicated : 1; + struct __drm_planes_state *planes; + struct __drm_crtcs_state *crtcs; + int num_connector; + struct __drm_connnectors_state *connectors; + int num_private_objs; + struct __drm_private_objs_state *private_objs; + + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @fake_commit: + * + * Used for signaling unbound planes/connectors. + * When a connector or plane is not bound to any CRTC, it's still important + * to preserve linearity to prevent the atomic states from being freed to early. + * + * This commit (if set) is not bound to any CRTC, but will be completed when + * drm_atomic_helper_commit_hw_done() is called. + */ + struct drm_crtc_commit *fake_commit; + + /** + * @commit_work: + * + * Work item which can be used by the driver or helpers to execute the + * commit without blocking. + */ + struct work_struct commit_work; +}; + +void __drm_crtc_commit_free(struct kref *kref); + +/** + * drm_crtc_commit_get - acquire a reference to the CRTC commit + * @commit: CRTC commit + * + * Increases the reference of @commit. + * + * Returns: + * The pointer to @commit, with reference increased. + */ +static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit) +{ + kref_get(&commit->ref); + return commit; +} + +/** + * drm_crtc_commit_put - release a reference to the CRTC commmit + * @commit: CRTC commit + * + * This releases a reference to @commit which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, __drm_crtc_commit_free); +} + +int drm_crtc_commit_wait(struct drm_crtc_commit *commit); + +struct drm_atomic_state * __must_check +drm_atomic_state_alloc(struct drm_device *dev); +void drm_atomic_state_clear(struct drm_atomic_state *state); + +/** + * drm_atomic_state_get - acquire a reference to the atomic state + * @state: The atomic state + * + * Returns a new reference to the @state + */ +static inline struct drm_atomic_state * +drm_atomic_state_get(struct drm_atomic_state *state) +{ + kref_get(&state->ref); + return state; +} + +void __drm_atomic_state_free(struct kref *ref); + +/** + * drm_atomic_state_put - release a reference to the atomic state + * @state: The atomic state + * + * This releases a reference to @state which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_atomic_state_put(struct drm_atomic_state *state) +{ + kref_put(&state->ref, __drm_atomic_state_free); +} + +int __must_check +drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_state_default_clear(struct drm_atomic_state *state); +void drm_atomic_state_default_release(struct drm_atomic_state *state); + +struct drm_crtc_state * __must_check +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc); +struct drm_plane_state * __must_check +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane); +struct drm_connector_state * __must_check +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector); + +void drm_atomic_private_obj_init(struct drm_device *dev, + struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); +void drm_atomic_private_obj_fini(struct drm_private_obj *obj); + +struct drm_private_state * __must_check +drm_atomic_get_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); +struct drm_private_state * +drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); +struct drm_private_state * +drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); + +struct drm_connector * +drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); + +/** + * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the CRTC state for the given CRTC, or NULL + * if the CRTC is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_crtc_state or + * @drm_atomic_get_new_crtc_state should be used instead. + */ +static inline struct drm_crtc_state * +drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].state; +} + +/** + * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the old CRTC state for the given CRTC, or + * NULL if the CRTC is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_old_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].old_state; +} +/** + * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the new CRTC state for the given CRTC, or + * NULL if the CRTC is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_new_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].new_state; +} + +/** + * drm_atomic_get_existing_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, or NULL + * if the plane is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_plane_state or + * @drm_atomic_get_new_plane_state should be used instead. + */ +static inline struct drm_plane_state * +drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].state; +} + +/** + * drm_atomic_get_old_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the old plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_old_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].old_state; +} + +/** + * drm_atomic_get_new_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the new plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_new_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].new_state; +} + +/** + * drm_atomic_get_existing_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_connector_state or + * @drm_atomic_get_new_connector_state should be used instead. + */ +static inline struct drm_connector_state * +drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].state; +} + +/** + * drm_atomic_get_old_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the old connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_old_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].old_state; +} + +/** + * drm_atomic_get_new_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the new connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_new_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].new_state; +} + +/** + * __drm_atomic_get_current_plane_state - get current plane state + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, either from + * @state, or if the plane isn't part of the atomic state update, from @plane. + * This is useful in atomic check callbacks, when drivers need to peek at, but + * not change, state of other planes, since it avoids threading an error code + * back up the call chain. + * + * WARNING: + * + * Note that this function is in general unsafe since it doesn't check for the + * required locking for access state structures. Drivers must ensure that it is + * safe to access the returned state structure through other means. One common + * example is when planes are fixed to a single CRTC, and the driver knows that + * the CRTC lock is held already. In that case holding the CRTC lock gives a + * read-lock on all planes connected to that CRTC. But if planes can be + * reassigned things get more tricky. In that case it's better to use + * drm_atomic_get_plane_state and wire up full error handling. + * + * Returns: + * + * Read-only pointer to the current plane state. + */ +static inline const struct drm_plane_state * +__drm_atomic_get_current_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + if (state->planes[drm_plane_index(plane)].state) + return state->planes[drm_plane_index(plane)].state; + + return plane->state; +} + +int __must_check +drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, + struct drm_encoder *encoder); +int __must_check +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +int __must_check drm_atomic_check_only(struct drm_atomic_state *state); +int __must_check drm_atomic_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); + +void drm_state_dump(struct drm_device *dev, struct drm_printer *p); + +/** + * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (old_connector_state) = (__state)->connectors[__i].old_state, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) + +/** + * for_each_old_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (old_connector_state) = (__state)->connectors[__i].old_state, 1)) + +/** + * for_each_new_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (new_connector_state) = (__state)->connectors[__i].new_state, \ + (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, \ + (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, \ + (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) + +/** + * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, \ + (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic + * update in reverse order + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update in reverse order, + * tracking both old and new state. This is useful in places where the + * state delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_new_plane_in_state_reverse - other than only tracking new state, + * it's the same as for_each_oldnew_plane_in_state_reverse + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + */ +#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_old_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state, 1)) +/** + * for_each_new_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (new_plane_state) = (__state)->planes[__i].new_state, \ + (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking both + * old and new state. This is useful in places where the state delta needs + * to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the old state. This is useful in disable functions, where we need the old + * state the hardware is still in. + */ +#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ + (__i)++) + +/** + * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the new state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (void)(obj) /* Only to avoid unused-but-set-variable warning */, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * drm_atomic_crtc_needs_modeset - compute combined modeset need + * @state: &drm_crtc_state for the CRTC + * + * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track + * whether the state CRTC changed enough to need a full modeset cycle: + * mode_changed, active_changed and connectors_changed. This helper simply + * combines these three to compute the overall need for a modeset for @state. + * + * The atomic helper code sets these booleans, but drivers can and should + * change them appropriately to accurately represent whether a modeset is + * really needed. In general, drivers should avoid full modesets whenever + * possible. + * + * For example if the CRTC mode has changed, and the hardware is able to enact + * the requested mode change without going through a full modeset, the driver + * should clear mode_changed in its &drm_mode_config_funcs.atomic_check + * implementation. + */ +static inline bool +drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed || + state->connectors_changed; +} + +/** + * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active + * @state: &drm_crtc_state for the CRTC + * + * When in self refresh mode, the crtc_state->active value will be false, since + * the CRTC is off. However in some cases we're interested in whether the CRTC + * is active, or effectively active (ie: it's connected to an active display). + * In these cases, use this function instead of just checking active. + */ +static inline bool +drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) +{ + return state->active || state->self_refresh_active; +} + +/** + * struct drm_bus_cfg - bus configuration + * + * This structure stores the configuration of a physical bus between two + * components in an output pipeline, usually between two bridges, an encoder + * and a bridge, or a bridge and a connector. + * + * The bus configuration is stored in &drm_bridge_state separately for the + * input and output buses, as seen from the point of view of each bridge. The + * bus configuration of a bridge output is usually identical to the + * configuration of the next bridge's input, but may differ if the signals are + * modified between the two bridges, for instance by an inverter on the board. + * The input and output configurations of a bridge may differ if the bridge + * modifies the signals internally, for instance by performing format + * conversion, or modifying signals polarities. + */ +struct drm_bus_cfg { + /** + * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format) + * + * This field should not be directly modified by drivers + * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus + * format negotiation). + */ + u32 format; + + /** + * @flags: DRM_BUS_* flags used on this bus + */ + u32 flags; +}; + +/** + * struct drm_bridge_state - Atomic bridge state object + */ +struct drm_bridge_state { + /** + * @base: inherit from &drm_private_state + */ + struct drm_private_state base; + + /** + * @bridge: the bridge this state refers to + */ + struct drm_bridge *bridge; + + /** + * @input_bus_cfg: input bus configuration + */ + struct drm_bus_cfg input_bus_cfg; + + /** + * @output_bus_cfg: input bus configuration + */ + struct drm_bus_cfg output_bus_cfg; +}; + +static inline struct drm_bridge_state * +drm_priv_to_bridge_state(struct drm_private_state *priv) +{ + return container_of(priv, struct drm_bridge_state, base); +} + +struct drm_bridge_state * +drm_atomic_get_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_old_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_new_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); + +#endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h new file mode 100644 index 000000000..06d8902a8 --- /dev/null +++ b/include/drm/drm_atomic_helper.h @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_HELPER_H_ +#define DRM_ATOMIC_HELPER_H_ + +#include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_modeset_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_util.h> + +/* + * Drivers that don't allow primary plane scaling may pass this macro in place + * of the min/max scale parameters of the plane-state checker function. + * + * Due to src being in 16.16 fixed point and dest being in integer pixels, + * 1<<16 represents no scaling. + */ +#define DRM_PLANE_NO_SCALING (1<<16) + +struct drm_atomic_state; +struct drm_private_obj; +struct drm_private_state; + +int drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_atomic_state *state); +int +drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, + struct drm_connector_state *conn_state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); +int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state, + bool can_disable_primary_plane); +int drm_atomic_helper_check_planes(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state, + bool pre_swap); + +void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void +drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void +drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); + +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state); + +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); + +#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) +#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) + +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *state, + uint32_t flags); +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state); +void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void +drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, + bool atomic); + +int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); + +/* nonblocking commit helpers */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock); +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); + +/* implementations for legacy interfaces */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); + +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void drm_atomic_helper_shutdown(struct drm_device *dev); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target, + struct drm_modeset_acquire_ctx *ctx); + +/** + * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC + * @plane: the loop cursor + * @crtc: the CRTC whose planes are iterated + * + * This iterates over the current state, useful (for example) when applying + * atomic state after it has been checked and swapped. To iterate over the + * planes which *will* be attached (more useful in code called from + * &drm_mode_config_funcs.atomic_check) see + * drm_atomic_crtc_state_for_each_plane(). + */ +#define drm_atomic_crtc_for_each_plane(plane, crtc) \ + drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) + +/** + * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state + * @plane: the loop cursor + * @crtc_state: the incoming CRTC state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + */ +#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) + +/** + * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state + * @plane: the loop cursor + * @plane_state: loop cursor for the plane's state, must be const + * @crtc_state: the incoming CRTC state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + * + * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a + * const plane_state. This is useful when a driver just wants to peek at other + * active planes on this CRTC, but does not need to change it. + */ +#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ + for_each_if ((plane_state = \ + __drm_atomic_get_current_plane_state((crtc_state)->state, \ + plane))) + +/** + * drm_atomic_plane_disabling - check whether a plane is being disabled + * @old_plane_state: old atomic plane state + * @new_plane_state: new atomic plane state + * + * Checks the atomic state of a plane to determine whether it's being disabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being disabled, false otherwise. + */ +static inline bool +drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + /* + * When disabling a plane, CRTC and FB should always be NULL together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || + (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); + + return old_plane_state->crtc && !new_plane_state->crtc; +} + +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + +#endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h new file mode 100644 index 000000000..3f8f1d627 --- /dev/null +++ b/include/drm/drm_atomic_state_helper.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#include <linux/types.h> + +struct drm_bridge; +struct drm_bridge_state; +struct drm_crtc; +struct drm_crtc_state; +struct drm_plane; +struct drm_plane_state; +struct drm_connector; +struct drm_connector_state; +struct drm_private_obj; +struct drm_private_state; +struct drm_modeset_acquire_ctx; +struct drm_device; + +void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state, + struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, + struct drm_crtc_state *state); +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state, + struct drm_plane *plane); +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *state); +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state, + struct drm_connector *connector); +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); + +void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge); +void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_reset(struct drm_bridge *bridge); diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h new file mode 100644 index 000000000..4c6d39d7b --- /dev/null +++ b/include/drm/drm_atomic_uapi.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_UAPI_H_ +#define DRM_ATOMIC_UAPI_H_ + +struct drm_crtc_state; +struct drm_display_mode; +struct drm_property_blob; +struct drm_plane_state; +struct drm_crtc; +struct drm_connector_state; +struct dma_fence; +struct drm_framebuffer; + +int __must_check +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + const struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); + +#endif diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h new file mode 100644 index 000000000..0d36bfd1a --- /dev/null +++ b/include/drm/drm_audio_component.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: MIT +// Copyright © 2014 Intel Corporation + +#ifndef _DRM_AUDIO_COMPONENT_H_ +#define _DRM_AUDIO_COMPONENT_H_ + +struct drm_audio_component; +struct device; + +/** + * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver + */ +struct drm_audio_component_ops { + /** + * @owner: drm module to pin down + */ + struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + * + * Returns a wakeref cookie to be passed back to the corresponding + * call to @put_power. + */ + unsigned long (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ + void (*put_power)(struct device *, unsigned long); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ + void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ + int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ + int (*sync_audio_rate)(struct device *, int port, int pipe, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, int pipe, bool *enabled, + unsigned char *buf, int max_bytes); +}; + +/** + * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver + */ +struct drm_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ + void *audio_ptr; + /** + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the DRM driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). + */ + void (*pin_eld_notify)(void *audio_ptr, int port, int pipe); + /** + * @pin2port: Check and convert from pin node to port number + * + * Called by HDA driver to check and convert from the pin widget node + * number to a port number in the graphics side. + */ + int (*pin2port)(void *audio_ptr, int pin); + /** + * @master_bind: (Optional) component master bind callback + * + * Called at binding master component, for HDA codec-specific + * handling of dynamic binding. + */ + int (*master_bind)(struct device *dev, struct drm_audio_component *); + /** + * @master_unbind: (Optional) component master unbind callback + * + * Called at unbinding master component, for HDA codec-specific + * handling of dynamic unbinding. + */ + void (*master_unbind)(struct device *dev, struct drm_audio_component *); +}; + +/** + * struct drm_audio_component - Used for direct communication between DRM and hda drivers + */ +struct drm_audio_component { + /** + * @dev: DRM device, used as parameter for ops + */ + struct device *dev; + /** + * @ops: Ops implemented by DRM driver, called by hda driver + */ + const struct drm_audio_component_ops *ops; + /** + * @audio_ops: Ops implemented by hda driver, called by DRM driver + */ + const struct drm_audio_component_audio_ops *audio_ops; + /** + * @master_bind_complete: completion held during component master binding + */ + struct completion master_bind_complete; +}; + +#endif /* _DRM_AUDIO_COMPONENT_H_ */ diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h new file mode 100644 index 000000000..ba248ca88 --- /dev/null +++ b/include/drm/drm_auth.h @@ -0,0 +1,161 @@ +#ifndef _DRM_AUTH_H_ +#define _DRM_AUTH_H_ + +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2016 Intel Corporation + * + * Author: Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/wait.h> + +struct drm_file; +struct drm_hw_lock; + +/* + * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for + * include ordering reasons. + * + * DO NOT USE. + */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; + struct drm_file *file_priv; + wait_queue_head_t lock_queue; + unsigned long lock_time; + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; +}; + +/** + * struct drm_master - drm master structure + * + * @refcount: Refcount for this master object. + * @dev: Link back to the DRM device + * @driver_priv: Pointer to driver-private information. + * + * Note that master structures are only relevant for the legacy/primary device + * nodes, hence there can only be one per device, not one per drm_minor. + */ +struct drm_master { + struct kref refcount; + struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by + * &drm_device.master_mutex. + */ + char *unique; + /** + * @unique_len: Length of unique field. Protected by + * &drm_device.master_mutex. + */ + int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by + * &drm_device.master_mutex. + */ + struct idr magic_map; + void *driver_priv; + + /** + * @lessor: + * + * Lease grantor, only set if this &struct drm_master represents a + * lessee holding a lease of objects from @lessor. Full owners of the + * device have this set to NULL. + * + * The lessor does not change once it's set in drm_lease_create(), and + * each lessee holds a reference to its lessor that it releases upon + * being destroyed in drm_lease_destroy(). + * + * See also the :ref:`section on display resource leasing + * <drm_leasing>`. + */ + struct drm_master *lessor; + + /** + * @lessee_id: + * + * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0. + * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. + */ + int lessee_id; + + /** + * @lessee_list: + * + * List entry of lessees of @lessor, where they are linked to @lessees. + * Not used for owners. Protected by &drm_device.mode_config's + * &drm_mode_config.idr_mutex. + */ + struct list_head lessee_list; + + /** + * @lessees: + * + * List of drm_masters leasing from this one. Protected by + * &drm_device.mode_config's &drm_mode_config.idr_mutex. + * + * This list is empty if no leases have been granted, or if all lessees + * have been destroyed. Since lessors are referenced by all their + * lessees, this master cannot be destroyed unless the list is empty. + */ + struct list_head lessees; + + /** + * @leases: + * + * Objects leased to this drm_master. Protected by + * &drm_device.mode_config's &drm_mode_config.idr_mutex. + * + * Objects are leased all together in drm_lease_create(), and are + * removed all together when the lease is revoked. + */ + struct idr leases; + + /** + * @lessee_idr: + * + * All lessees under this owner (only used where @lessor is NULL). + * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. + */ + struct idr lessee_idr; + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + struct drm_lock_data lock; +#endif +}; + +struct drm_master *drm_master_get(struct drm_master *master); +struct drm_master *drm_file_get_master(struct drm_file *file_priv); +void drm_master_put(struct drm_master **master); +bool drm_is_current_master(struct drm_file *fpriv); + +struct drm_master *drm_master_create(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h new file mode 100644 index 000000000..88bdfec3b --- /dev/null +++ b/include/drm/drm_blend.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BLEND_H__ +#define __DRM_BLEND_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode.h> + +#define DRM_MODE_BLEND_PREMULTI 0 +#define DRM_MODE_BLEND_COVERAGE 1 +#define DRM_MODE_BLEND_PIXEL_NONE 2 + +struct drm_device; +struct drm_atomic_state; +struct drm_plane; + +static inline bool drm_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); +} + +#define DRM_BLEND_ALPHA_OPAQUE 0xffff + +int drm_plane_create_alpha_property(struct drm_plane *plane); +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations); +unsigned int drm_rotation_simplify(unsigned int rotation, + unsigned int supported_rotations); + +int drm_plane_create_zpos_property(struct drm_plane *plane, + unsigned int zpos, + unsigned int min, unsigned int max); +int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, + unsigned int zpos); +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_plane_create_blend_mode_property(struct drm_plane *plane, + unsigned int supported_modes); +#endif diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h new file mode 100644 index 000000000..a76f4103d --- /dev/null +++ b/include/drm/drm_bridge.h @@ -0,0 +1,980 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BRIDGE_H__ +#define __DRM_BRIDGE_H__ + +#include <linux/ctype.h> +#include <linux/list.h> +#include <linux/mutex.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_encoder.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_modes.h> + +struct drm_bridge; +struct drm_bridge_timings; +struct drm_connector; +struct drm_display_info; +struct drm_panel; +struct edid; +struct i2c_adapter; + +/** + * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach + */ +enum drm_bridge_attach_flags { + /** + * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge + * shall not create a drm_connector. + */ + DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0), +}; + +/** + * struct drm_bridge_funcs - drm_bridge control functions + */ +struct drm_bridge_funcs { + /** + * @attach: + * + * This callback is invoked whenever our bridge is being attached to a + * &drm_encoder. The flags argument tunes the behaviour of the attach + * operation (see DRM_BRIDGE_ATTACH_*). + * + * The @attach callback is optional. + * + * RETURNS: + * + * Zero on success, error code on failure. + */ + int (*attach)(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags); + + /** + * @detach: + * + * This callback is invoked whenever our bridge is being detached from a + * &drm_encoder. + * + * The @detach callback is optional. + */ + void (*detach)(struct drm_bridge *bridge); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * bridge. This should be implemented if the bridge has some sort of + * restriction in the modes it can display. For example, a given bridge + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * The @mode_valid callback is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. See also &drm_crtc_state.adjusted_mode for more details. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup() + * is not called when &drm_bridge_funcs.atomic_check() is implemented, + * so only one of them should be provided. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in @state parameter. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any bridge constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * The @disable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_disable. + */ + void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. + * + * The @post_disable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_post_disable. + */ + void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the @mode_set callback for the preceding element in the display + * pipeline has been called already. If the bridge is the first element + * then this would be &drm_encoder_helper_funcs.mode_set. The display + * pipe (i.e. clocks and timing signals) is off when this function is + * called. + * + * The adjusted_mode parameter is the mode output by the CRTC for the + * first bridge in the chain. It can be different from the mode + * parameter that contains the desired mode for the connector at the end + * of the bridges chain, for instance when the first bridge in the chain + * performs scaling. The adjusted mode is mostly useful for the first + * bridge in the chain and is likely irrelevant for the other bridges. + * + * For atomic drivers the adjusted_mode is the mode stored in + * &drm_crtc_state.adjusted_mode. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall set their mode in the + * &drm_bridge_funcs.atomic_enable operation. + */ + void (*mode_set)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * The @pre_enable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_pre_enable. + */ + void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * The @enable callback is optional. + * + * NOTE: + * + * This is deprecated, do not use! + * New drivers shall use &drm_bridge_funcs.atomic_enable. + */ + void (*enable)(struct drm_bridge *bridge); + + /** + * @atomic_pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_pre_enable or @pre_enable function. If the preceding + * element is a &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_pre_enable. It would be prudent to also provide an + * implementation of @pre_enable if you are expecting driver calls into + * &drm_bridge_chain_pre_enable. + * + * The @atomic_pre_enable callback is optional. + */ + void (*atomic_pre_enable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @atomic_enable or @enable function. If the preceding element + * is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_chain_enable. + * It would be prudent to also provide an implementation of @enable if + * you are expecting driver calls into &drm_bridge_chain_enable. + * + * The @atomic_enable callback is optional. + */ + void (*atomic_enable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + /** + * @atomic_disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_disable or @disable vfunc. If the preceding element + * is a &drm_encoder it's called right before the + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_disable. It would be prudent to also provide an + * implementation of @disable if you are expecting driver calls into + * &drm_bridge_chain_disable. + * + * The @atomic_disable callback is optional. + */ + void (*atomic_disable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @atomic_post_disable or @post_disable function. If the preceding + * element is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_post_disable. + * It would be prudent to also provide an implementation of + * @post_disable if you are expecting driver calls into + * &drm_bridge_chain_post_disable. + * + * The @atomic_post_disable callback is optional. + */ + void (*atomic_post_disable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current bridge state object (which is guaranteed to be + * non-NULL). + * + * The atomic_duplicate_state hook is mandatory if the bridge + * implements any of the atomic hooks, and should be left unassigned + * otherwise. For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_duplicate_state() helper function shall be + * used to implement this hook. + * + * RETURNS: + * A valid drm_bridge_state object or NULL if the allocation fails. + */ + struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge); + + /** + * @atomic_destroy_state: + * + * Destroy a bridge state object previously allocated by + * &drm_bridge_funcs.atomic_duplicate_state(). + * + * The atomic_destroy_state hook is mandatory if the bridge implements + * any of the atomic hooks, and should be left unassigned otherwise. + * For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_destroy_state() helper function shall be + * used to implement this hook. + */ + void (*atomic_destroy_state)(struct drm_bridge *bridge, + struct drm_bridge_state *state); + + /** + * @atomic_get_output_bus_fmts: + * + * Return the supported bus formats on the output end of a bridge. + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_output_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). + * + * This method is only called on the last element of the bridge chain + * as part of the bus format negotiation process that happens in + * &drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will + * fall back to &drm_connector.display_info.bus_formats[0] if + * &drm_connector.display_info.num_bus_formats > 0, + * or to MEDIA_BUS_FMT_FIXED otherwise. + */ + u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts); + + /** + * @atomic_get_input_bus_fmts: + * + * Return the supported bus formats on the input end of a bridge for + * a specific output bus format. + * + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_input_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). When the format is not supported NULL should be + * returned and num_input_fmts should be set to 0. + * + * This method is called on all elements of the bridge chain as part of + * the bus format negotiation process that happens in + * drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will bypass + * bus format negotiation on this element of the bridge without + * failing, and the previous element in the chain will be passed + * MEDIA_BUS_FMT_FIXED as its output bus format. + * + * Bridge drivers that need to support being linked to bridges that are + * not supporting bus format negotiation should handle the + * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a + * sensible default value or extracting this information from somewhere + * else (FW property, &drm_display_mode, &drm_display_info, ...) + * + * Note: Even if input format selection on the first bridge has no + * impact on the negotiation process (bus format negotiation stops once + * we reach the first element of the chain), drivers are expected to + * return accurate input formats as the input format may be used to + * configure the CRTC output appropriately. + */ + u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + + /** + * @atomic_check: + * + * This method is responsible for checking bridge state correctness. + * It can also check the state of the surrounding components in chain + * to make sure the whole pipeline can work properly. + * + * &drm_bridge_funcs.atomic_check() hooks are called in reverse + * order (from the last to the first bridge). + * + * This method is optional. &drm_bridge_funcs.mode_fixup() is not + * called when &drm_bridge_funcs.atomic_check() is implemented, so only + * one of them should be provided. + * + * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or + * &drm_bridge_state.output_bus_cfg.flags it should happen in + * this function. By default the &drm_bridge_state.output_bus_cfg.flags + * field is set to the next bridge + * &drm_bridge_state.input_bus_cfg.flags value or + * &drm_connector.display_info.bus_flags if the bridge is the last + * element in the chain. + * + * RETURNS: + * zero if the check passed, a negative error code otherwise. + */ + int (*atomic_check)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @atomic_reset: + * + * Reset the bridge to a predefined state (or retrieve its current + * state) and return a &drm_bridge_state object matching this state. + * This function is called at attach time. + * + * The atomic_reset hook is mandatory if the bridge implements any of + * the atomic hooks, and should be left unassigned otherwise. For + * bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_reset() helper function shall be used to + * implement this hook. + * + * Note that the atomic_reset() semantics is not exactly matching the + * reset() semantics found on other components (connector, plane, ...). + * + * 1. The reset operation happens when the bridge is attached, not when + * drm_mode_config_reset() is called + * 2. It's meant to be used exclusively on bridges that have been + * converted to the ATOMIC API + * + * RETURNS: + * A valid drm_bridge_state object in case of success, an ERR_PTR() + * giving the reason of the failure otherwise. + */ + struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge); + + /** + * @detect: + * + * Check if anything is attached to the bridge output. + * + * This callback is optional, if not implemented the bridge will be + * considered as always having a component attached to its output. + * Bridges that implement this callback shall set the + * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops. + * + * RETURNS: + * + * drm_connector_status indicating the bridge output status. + */ + enum drm_connector_status (*detect)(struct drm_bridge *bridge); + + /** + * @get_modes: + * + * Fill all modes currently valid for the sink into the &drm_connector + * with drm_mode_probed_add(). + * + * The @get_modes callback is mostly intended to support non-probeable + * displays such as many fixed panels. Bridges that support reading + * EDID shall leave @get_modes unimplemented and implement the + * &drm_bridge_funcs->get_edid callback instead. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of + * filling modes, and shall not be stored internally by bridge drivers + * for future usage. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @get_edid: + * + * Read and parse the EDID data of the connected display. + * + * The @get_edid callback is the preferred way of reporting mode + * information for a display connected to the bridge output. Bridges + * that support reading EDID shall implement this callback and leave + * the @get_modes callback unimplemented. + * + * The caller of this operation shall first verify the output + * connection status and refrain from reading EDID from a disconnected + * output. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of EDID + * retrieval and parsing, and shall not be stored internally by bridge + * drivers for future usage. + * + * RETURNS: + * + * An edid structure newly allocated with kmalloc() (or similar) on + * success, or NULL otherwise. The caller is responsible for freeing + * the returned edid structure with kfree(). + */ + struct edid *(*get_edid)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hpd_notify: + * + * Notify the bridge of hot plug detection. + * + * This callback is optional, it may be implemented by bridges that + * need to be notified of display connection or disconnection for + * internal reasons. One use case is to reset the internal state of CEC + * controllers for HDMI bridges. + */ + void (*hpd_notify)(struct drm_bridge *bridge, + enum drm_connector_status status); + + /** + * @hpd_enable: + * + * Enable hot plug detection. From now on the bridge shall call + * drm_bridge_hpd_notify() each time a change is detected in the output + * connection status, until hot plug detection gets disabled with + * @hpd_disable. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_disable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_enable)(struct drm_bridge *bridge); + + /** + * @hpd_disable: + * + * Disable hot plug detection. Once this function returns the bridge + * shall not call drm_bridge_hpd_notify() when a change in the output + * connection status occurs. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_enable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_disable)(struct drm_bridge *bridge); + + /** + * @debugfs_init: + * + * Allows bridges to create bridge-specific debugfs files. + */ + void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root); +}; + +/** + * struct drm_bridge_timings - timing information for the bridge + */ +struct drm_bridge_timings { + /** + * @input_bus_flags: + * + * Tells what additional settings for the pixel data on the bus + * this bridge requires (like pixel signal polarity). See also + * &drm_display_info->bus_flags. + */ + u32 input_bus_flags; + /** + * @setup_time_ps: + * + * Defines the time in picoseconds the input data lines must be + * stable before the clock edge. + */ + u32 setup_time_ps; + /** + * @hold_time_ps: + * + * Defines the time in picoseconds taken for the bridge to sample the + * input signal after the clock edge. + */ + u32 hold_time_ps; + /** + * @dual_link: + * + * True if the bus operates in dual-link mode. The exact meaning is + * dependent on the bus type. For LVDS buses, this indicates that even- + * and odd-numbered pixels are received on separate links. + */ + bool dual_link; +}; + +/** + * enum drm_bridge_ops - Bitmask of operations supported by the bridge + */ +enum drm_bridge_ops { + /** + * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to + * its output. Bridges that set this flag shall implement the + * &drm_bridge_funcs->detect callback. + */ + DRM_BRIDGE_OP_DETECT = BIT(0), + /** + * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display + * connected to its output. Bridges that set this flag shall implement + * the &drm_bridge_funcs->get_edid callback. + */ + DRM_BRIDGE_OP_EDID = BIT(1), + /** + * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug + * without requiring polling. Bridges that set this flag shall + * implement the &drm_bridge_funcs->hpd_enable and + * &drm_bridge_funcs->hpd_disable callbacks if they support enabling + * and disabling hot-plug detection dynamically. + */ + DRM_BRIDGE_OP_HPD = BIT(2), + /** + * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported + * by the display at its output. This does not include reading EDID + * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set + * this flag shall implement the &drm_bridge_funcs->get_modes callback. + */ + DRM_BRIDGE_OP_MODES = BIT(3), +}; + +/** + * struct drm_bridge - central DRM bridge control structure + */ +struct drm_bridge { + /** @base: inherit from &drm_private_object */ + struct drm_private_obj base; + /** @dev: DRM device this bridge belongs to */ + struct drm_device *dev; + /** @encoder: encoder to which this bridge is connected */ + struct drm_encoder *encoder; + /** @chain_node: used to form a bridge chain */ + struct list_head chain_node; +#ifdef CONFIG_OF + /** @of_node: device node pointer to the bridge */ + struct device_node *of_node; +#endif + /** @list: to keep track of all added bridges */ + struct list_head list; + /** + * @timings: + * + * the timing specification for the bridge, if any (may be NULL) + */ + const struct drm_bridge_timings *timings; + /** @funcs: control functions */ + const struct drm_bridge_funcs *funcs; + /** @driver_private: pointer to the bridge driver's internal context */ + void *driver_private; + /** @ops: bitmask of operations supported by the bridge */ + enum drm_bridge_ops ops; + /** + * @type: Type of the connection at the bridge output + * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this + * identifies the type of connected display. + */ + int type; + /** + * @interlace_allowed: Indicate that the bridge can handle interlaced + * modes. + */ + bool interlace_allowed; + /** + * @pre_enable_prev_first: The bridge requires that the prev + * bridge @pre_enable function is called before its @pre_enable, + * and conversely for post_disable. This is most frequently a + * requirement for DSI devices which need the host to be initialised + * before the peripheral. + */ + bool pre_enable_prev_first; + /** + * @ddc: Associated I2C adapter for DDC access, if any. + */ + struct i2c_adapter *ddc; + /** private: */ + /** + * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields. + */ + struct mutex hpd_mutex; + /** + * @hpd_cb: Hot plug detection callback, registered with + * drm_bridge_hpd_enable(). + */ + void (*hpd_cb)(void *data, enum drm_connector_status status); + /** + * @hpd_data: Private data passed to the Hot plug detection callback + * @hpd_cb. + */ + void *hpd_data; +}; + +static inline struct drm_bridge * +drm_priv_to_bridge(struct drm_private_obj *priv) +{ + return container_of(priv, struct drm_bridge, base); +} + +void drm_bridge_add(struct drm_bridge *bridge); +int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge); +void drm_bridge_remove(struct drm_bridge *bridge); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous, + enum drm_bridge_attach_flags flags); + +#ifdef CONFIG_OF +struct drm_bridge *of_drm_find_bridge(struct device_node *np); +#else +static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np) +{ + return NULL; +} +#endif + +/** + * drm_bridge_get_next_bridge() - Get the next bridge in the chain + * @bridge: bridge object + * + * RETURNS: + * the next bridge in the chain after @bridge, or NULL if @bridge is the last. + */ +static inline struct drm_bridge * +drm_bridge_get_next_bridge(struct drm_bridge *bridge) +{ + if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) + return NULL; + + return list_next_entry(bridge, chain_node); +} + +/** + * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain + * @bridge: bridge object + * + * RETURNS: + * the previous bridge in the chain, or NULL if @bridge is the first. + */ +static inline struct drm_bridge * +drm_bridge_get_prev_bridge(struct drm_bridge *bridge) +{ + if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) + return NULL; + + return list_prev_entry(bridge, chain_node); +} + +/** + * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain + * @encoder: encoder object + * + * RETURNS: + * the first bridge in the chain, or NULL if @encoder has no bridge attached + * to it. + */ +static inline struct drm_bridge * +drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder) +{ + return list_first_entry_or_null(&encoder->bridge_chain, + struct drm_bridge, chain_node); +} + +/** + * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain + * @encoder: the encoder to iterate bridges on + * @bridge: a bridge pointer updated to point to the current bridge at each + * iteration + * + * Iterate over all bridges present in the bridge chain attached to @encoder. + */ +#define drm_for_each_bridge_in_chain(encoder, bridge) \ + list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) + +bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_mode_status +drm_bridge_chain_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode); +void drm_bridge_chain_disable(struct drm_bridge *bridge); +void drm_bridge_chain_post_disable(struct drm_bridge *bridge); +void drm_bridge_chain_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); +void drm_bridge_chain_pre_enable(struct drm_bridge *bridge); +void drm_bridge_chain_enable(struct drm_bridge *bridge); + +int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); + +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + +enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); +int drm_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector); +struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector); +void drm_bridge_hpd_enable(struct drm_bridge *bridge, + void (*cb)(void *data, + enum drm_connector_status status), + void *data); +void drm_bridge_hpd_disable(struct drm_bridge *bridge); +void drm_bridge_hpd_notify(struct drm_bridge *bridge, + enum drm_connector_status status); + +#ifdef CONFIG_DRM_PANEL_BRIDGE +bool drm_bridge_is_panel(const struct drm_bridge *bridge); +struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel); +struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, + u32 connector_type); +void drm_panel_bridge_remove(struct drm_bridge *bridge); +int drm_panel_bridge_set_orientation(struct drm_connector *connector, + struct drm_bridge *bridge); +struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, + struct drm_panel *panel); +struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, + struct drm_panel *panel, + u32 connector_type); +struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm, + struct drm_panel *panel); +struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge); +#else +static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge) +{ + return false; +} + +static inline int drm_panel_bridge_set_orientation(struct drm_connector *connector, + struct drm_bridge *bridge) +{ + return -EINVAL; +} +#endif + +#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE) +struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node, + u32 port, u32 endpoint); +struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node, + u32 port, u32 endpoint); +#else +static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, + struct device_node *node, + u32 port, + u32 endpoint) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, + struct device_node *node, + u32 port, + u32 endpoint) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h new file mode 100644 index 000000000..33f6c3bbd --- /dev/null +++ b/include/drm/drm_bridge_connector.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef __DRM_BRIDGE_CONNECTOR_H__ +#define __DRM_BRIDGE_CONNECTOR_H__ + +struct drm_connector; +struct drm_device; +struct drm_encoder; + +void drm_bridge_connector_enable_hpd(struct drm_connector *connector); +void drm_bridge_connector_disable_hpd(struct drm_connector *connector); +struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, + struct drm_encoder *encoder); + +#endif /* __DRM_BRIDGE_CONNECTOR_H__ */ diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h new file mode 100644 index 000000000..572077ff8 --- /dev/null +++ b/include/drm/drm_buddy.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __DRM_BUDDY_H__ +#define __DRM_BUDDY_H__ + +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include <drm/drm_print.h> + +#define range_overflows(start, size, max) ({ \ + typeof(start) start__ = (start); \ + typeof(size) size__ = (size); \ + typeof(max) max__ = (max); \ + (void)(&start__ == &size__); \ + (void)(&start__ == &max__); \ + start__ >= max__ || size__ > max__ - start__; \ +}) + +#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0) +#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1) + +struct drm_buddy_block { +#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) +#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) +#define DRM_BUDDY_ALLOCATED (1 << 10) +#define DRM_BUDDY_FREE (2 << 10) +#define DRM_BUDDY_SPLIT (3 << 10) +/* Free to be used, if needed in the future */ +#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) +#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) + u64 header; + + struct drm_buddy_block *left; + struct drm_buddy_block *right; + struct drm_buddy_block *parent; + + void *private; /* owned by creator */ + + /* + * While the block is allocated by the user through drm_buddy_alloc*, + * the user has ownership of the link, for example to maintain within + * a list, if so desired. As soon as the block is freed with + * drm_buddy_free* ownership is given back to the mm. + */ + struct list_head link; + struct list_head tmp_link; +}; + +/* Order-zero must be at least PAGE_SIZE */ +#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) + +/* + * Binary Buddy System. + * + * Locking should be handled by the user, a simple mutex around + * drm_buddy_alloc* and drm_buddy_free* should suffice. + */ +struct drm_buddy { + /* Maintain a free list for each order. */ + struct list_head *free_list; + + /* + * Maintain explicit binary tree(s) to track the allocation of the + * address space. This gives us a simple way of finding a buddy block + * and performing the potentially recursive merge step when freeing a + * block. Nodes are either allocated or free, in which case they will + * also exist on the respective free list. + */ + struct drm_buddy_block **roots; + + /* + * Anything from here is public, and remains static for the lifetime of + * the mm. Everything above is considered do-not-touch. + */ + unsigned int n_roots; + unsigned int max_order; + + /* Must be at least PAGE_SIZE */ + u64 chunk_size; + u64 size; + u64 avail; +}; + +static inline u64 +drm_buddy_block_offset(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_OFFSET; +} + +static inline unsigned int +drm_buddy_block_order(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_ORDER; +} + +static inline unsigned int +drm_buddy_block_state(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_STATE; +} + +static inline bool +drm_buddy_block_is_allocated(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED; +} + +static inline bool +drm_buddy_block_is_free(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_FREE; +} + +static inline bool +drm_buddy_block_is_split(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT; +} + +static inline u64 +drm_buddy_block_size(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + return mm->chunk_size << drm_buddy_block_order(block); +} + +int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size); + +void drm_buddy_fini(struct drm_buddy *mm); + +struct drm_buddy_block * +drm_get_buddy(struct drm_buddy_block *block); + +int drm_buddy_alloc_blocks(struct drm_buddy *mm, + u64 start, u64 end, u64 size, + u64 min_page_size, + struct list_head *blocks, + unsigned long flags); + +int drm_buddy_block_trim(struct drm_buddy *mm, + u64 new_size, + struct list_head *blocks); + +void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); + +void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects); + +void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); +void drm_buddy_block_print(struct drm_buddy *mm, + struct drm_buddy_block *block, + struct drm_printer *p); + +#endif diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h new file mode 100644 index 000000000..08e0e3ffa --- /dev/null +++ b/include/drm/drm_cache.h @@ -0,0 +1,88 @@ +/************************************************************************** + * + * Copyright 2009 Red Hat Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Dave Airlie <airlied@redhat.com> + */ + +#ifndef _DRM_CACHE_H_ +#define _DRM_CACHE_H_ + +#include <linux/scatterlist.h> + +struct iosys_map; + +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); +bool drm_need_swiotlb(int dma_bits); + + +static inline bool drm_arch_can_wc_memory(void) +{ +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) + return false; +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON64) + return false; +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) + /* + * The DRM driver stack is designed to work with cache coherent devices + * only, but permits an optimization to be enabled in some cases, where + * for some buffers, both the CPU and the GPU use uncached mappings, + * removing the need for DMA snooping and allocation in the CPU caches. + * + * The use of uncached GPU mappings relies on the correct implementation + * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU + * will use cached mappings nonetheless. On x86 platforms, this does not + * seem to matter, as uncached CPU mappings will snoop the caches in any + * case. However, on ARM and arm64, enabling this optimization on a + * platform where NoSnoop is ignored results in loss of coherency, which + * breaks correct operation of the device. Since we have no way of + * detecting whether NoSnoop works or not, just disable this + * optimization entirely for ARM and arm64. + */ + return false; +#elif defined(CONFIG_LOONGARCH) + /* + * LoongArch maintains cache coherency in hardware, but its WUC attribute + * (Weak-ordered UnCached, which is similar to WC) is out of the scope of + * cache coherency machanism. This means WUC can only used for write-only + * memory regions. + */ + return false; +#else + return true; +#endif +} + +void drm_memcpy_init_early(void); + +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, + unsigned long len); +#endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h new file mode 100644 index 000000000..4fc8018ed --- /dev/null +++ b/include/drm/drm_client.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +#ifndef _DRM_CLIENT_H_ +#define _DRM_CLIENT_H_ + +#include <linux/iosys-map.h> +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> + +struct drm_client_dev; +struct drm_device; +struct drm_file; +struct drm_framebuffer; +struct drm_gem_object; +struct drm_minor; +struct module; + +/** + * struct drm_client_funcs - DRM client callbacks + */ +struct drm_client_funcs { + /** + * @owner: The module owner + */ + struct module *owner; + + /** + * @unregister: + * + * Called when &drm_device is unregistered. The client should respond by + * releasing its resources using drm_client_release(). + * + * This callback is optional. + */ + void (*unregister)(struct drm_client_dev *client); + + /** + * @restore: + * + * Called on drm_lastclose(). The first client instance in the list that + * returns zero gets the privilege to restore and no more clients are + * called. This callback is not called after @unregister has been called. + * + * Note that the core does not guarantee exclusion against concurrent + * drm_open(). Clients need to ensure this themselves, for example by + * using drm_master_internal_acquire() and + * drm_master_internal_release(). + * + * This callback is optional. + */ + int (*restore)(struct drm_client_dev *client); + + /** + * @hotplug: + * + * Called on drm_kms_helper_hotplug_event(). + * This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*hotplug)(struct drm_client_dev *client); +}; + +/** + * struct drm_client_dev - DRM client instance + */ +struct drm_client_dev { + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @name: Name of the client. + */ + const char *name; + + /** + * @list: + * + * List of all clients of a DRM device, linked into + * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex. + */ + struct list_head list; + + /** + * @funcs: DRM client functions (optional) + */ + const struct drm_client_funcs *funcs; + + /** + * @file: DRM file + */ + struct drm_file *file; + + /** + * @modeset_mutex: Protects @modesets. + */ + struct mutex modeset_mutex; + + /** + * @modesets: CRTC configurations + */ + struct drm_mode_set *modesets; +}; + +int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs); +void drm_client_release(struct drm_client_dev *client); +void drm_client_register(struct drm_client_dev *client); + +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev); + +/** + * struct drm_client_buffer - DRM client buffer + */ +struct drm_client_buffer { + /** + * @client: DRM client + */ + struct drm_client_dev *client; + + /** + * @handle: Buffer handle + */ + u32 handle; + + /** + * @pitch: Buffer pitch + */ + u32 pitch; + + /** + * @gem: GEM object backing this buffer + */ + struct drm_gem_object *gem; + + /** + * @map: Virtual address for the buffer + */ + struct iosys_map map; + + /** + * @fb: DRM framebuffer + */ + struct drm_framebuffer *fb; +}; + +struct drm_client_buffer * +drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); +void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); +int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +int drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map); +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); + +int drm_client_modeset_create(struct drm_client_dev *client); +void drm_client_modeset_free(struct drm_client_dev *client); +int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height); +bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation); +int drm_client_modeset_check(struct drm_client_dev *client); +int drm_client_modeset_commit_locked(struct drm_client_dev *client); +int drm_client_modeset_commit(struct drm_client_dev *client); +int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); + +/** + * drm_client_for_each_modeset() - Iterate over client modesets + * @modeset: &drm_mode_set loop cursor + * @client: DRM client + */ +#define drm_client_for_each_modeset(modeset, client) \ + for (({ lockdep_assert_held(&(client)->modeset_mutex); }), \ + modeset = (client)->modesets; modeset->crtc; modeset++) + +/** + * drm_client_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * This iterates the connectors that are useable for internal clients (excludes + * writeback connectors). + * + * For more info see drm_for_each_connector_iter(). + */ +#define drm_client_for_each_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + +void drm_client_debugfs_init(struct drm_minor *minor); + +#endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h new file mode 100644 index 000000000..81c298488 --- /dev/null +++ b/include/drm/drm_color_mgmt.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_COLOR_MGMT_H__ +#define __DRM_COLOR_MGMT_H__ + +#include <linux/ctype.h> +#include <drm/drm_property.h> + +struct drm_crtc; +struct drm_plane; + +/** + * drm_color_lut_extract - clamp and round LUT entries + * @user_input: input value + * @bit_precision: number of bits the hw LUT supports + * + * Extract a degamma/gamma LUT value provided by user (in the form of + * &drm_color_lut entries) and round it to the precision supported by the + * hardware. + */ +static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) +{ + u32 val = user_input; + u32 max = 0xffff >> (16 - bit_precision); + + /* Round only if we're not using full precision. */ + if (bit_precision < 16) { + val += 1UL << (16 - bit_precision - 1); + val >>= 16 - bit_precision; + } + + return clamp_val(val, 0, max); +} + +u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); + +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size); + +int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, + int gamma_size); + +/** + * drm_color_lut_size - calculate the number of entries in the LUT + * @blob: blob containing the LUT + * + * Returns: + * The number of entries in the color LUT stored in @blob. + */ +static inline int drm_color_lut_size(const struct drm_property_blob *blob) +{ + return blob->length / sizeof(struct drm_color_lut); +} + +enum drm_color_encoding { + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_BT2020, + DRM_COLOR_ENCODING_MAX, +}; + +enum drm_color_range { + DRM_COLOR_YCBCR_LIMITED_RANGE, + DRM_COLOR_YCBCR_FULL_RANGE, + DRM_COLOR_RANGE_MAX, +}; + +int drm_plane_create_color_properties(struct drm_plane *plane, + u32 supported_encodings, + u32 supported_ranges, + enum drm_color_encoding default_encoding, + enum drm_color_range default_range); + +/** + * enum drm_color_lut_tests - hw-specific LUT tests to perform + * + * The drm_color_lut_check() function takes a bitmask of the values here to + * determine which tests to apply to a userspace-provided LUT. + */ +enum drm_color_lut_tests { + /** + * @DRM_COLOR_LUT_EQUAL_CHANNELS: + * + * Checks whether the entries of a LUT all have equal values for the + * red, green, and blue channels. Intended for hardware that only + * accepts a single value per LUT entry and assumes that value applies + * to all three color components. + */ + DRM_COLOR_LUT_EQUAL_CHANNELS = BIT(0), + + /** + * @DRM_COLOR_LUT_NON_DECREASING: + * + * Checks whether the entries of a LUT are always flat or increasing + * (never decreasing). + */ + DRM_COLOR_LUT_NON_DECREASING = BIT(1), +}; + +int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests); +#endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h new file mode 100644 index 000000000..4d830fc55 --- /dev/null +++ b/include/drm/drm_connector.h @@ -0,0 +1,1927 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_CONNECTOR_H__ +#define __DRM_CONNECTOR_H__ + +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/ctype.h> +#include <linux/hdmi.h> +#include <linux/notifier.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_util.h> + +#include <uapi/drm/drm_mode.h> + +struct drm_connector_helper_funcs; +struct drm_modeset_acquire_ctx; +struct drm_device; +struct drm_crtc; +struct drm_encoder; +struct drm_panel; +struct drm_property; +struct drm_property_blob; +struct drm_printer; +struct drm_privacy_screen; +struct edid; +struct i2c_adapter; + +enum drm_connector_force { + DRM_FORCE_UNSPECIFIED, + DRM_FORCE_OFF, + DRM_FORCE_ON, /* force on analog part normally */ + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ +}; + +/** + * enum drm_connector_status - status for a &drm_connector + * + * This enum is used to track the connector status. There are no separate + * #defines for the uapi! + */ +enum drm_connector_status { + /** + * @connector_status_connected: The connector is definitely connected to + * a sink device, and can be enabled. + */ + connector_status_connected = 1, + /** + * @connector_status_disconnected: The connector isn't connected to a + * sink device which can be autodetect. For digital outputs like DP or + * HDMI (which can be realiable probed) this means there's really + * nothing there. It is driver-dependent whether a connector with this + * status can be lit up or not. + */ + connector_status_disconnected = 2, + /** + * @connector_status_unknown: The connector's status could not be + * reliably detected. This happens when probing would either cause + * flicker (like load-detection when the connector is in use), or when a + * hardware resource isn't available (like when load-detection needs a + * free CRTC). It should be possible to light up the connector with one + * of the listed fallback modes. For default configuration userspace + * should only try to light up connectors with unknown status when + * there's not connector with @connector_status_connected. + */ + connector_status_unknown = 3, +}; + +/** + * enum drm_connector_registration_state - userspace registration status for + * a &drm_connector + * + * This enum is used to track the status of initializing a connector and + * registering it with userspace, so that DRM can prevent bogus modesets on + * connectors that no longer exist. + */ +enum drm_connector_registration_state { + /** + * @DRM_CONNECTOR_INITIALIZING: The connector has just been created, + * but has yet to be exposed to userspace. There should be no + * additional restrictions to how the state of this connector may be + * modified. + */ + DRM_CONNECTOR_INITIALIZING = 0, + + /** + * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized + * and registered with sysfs, as such it has been exposed to + * userspace. There should be no additional restrictions to how the + * state of this connector may be modified. + */ + DRM_CONNECTOR_REGISTERED = 1, + + /** + * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed + * to userspace and has since been unregistered and removed from + * userspace, or the connector was unregistered before it had a chance + * to be exposed to userspace (e.g. still in the + * @DRM_CONNECTOR_INITIALIZING state). When a connector is + * unregistered, there are additional restrictions to how its state + * may be modified: + * + * - An unregistered connector may only have its DPMS changed from + * On->Off. Once DPMS is changed to Off, it may not be switched back + * to On. + * - Modesets are not allowed on unregistered connectors, unless they + * would result in disabling its assigned CRTCs. This means + * disabling a CRTC on an unregistered connector is OK, but enabling + * one is not. + * - Removing a CRTC from an unregistered connector is OK, but new + * CRTCs may never be assigned to an unregistered connector. + */ + DRM_CONNECTOR_UNREGISTERED = 2, +}; + +enum subpixel_order { + SubPixelUnknown = 0, + SubPixelHorizontalRGB, + SubPixelHorizontalBGR, + SubPixelVerticalRGB, + SubPixelVerticalBGR, + SubPixelNone, + +}; + +/** + * struct drm_scrambling: sink's scrambling support. + */ +struct drm_scrambling { + /** + * @supported: scrambling supported for rates > 340 Mhz. + */ + bool supported; + /** + * @low_rates: scrambling supported for rates <= 340 Mhz. + */ + bool low_rates; +}; + +/* + * struct drm_scdc - Information about scdc capabilities of a HDMI 2.0 sink + * + * Provides SCDC register support and capabilities related information on a + * HDMI 2.0 sink. In case of a HDMI 1.4 sink, all parameter must be 0. + */ +struct drm_scdc { + /** + * @supported: status control & data channel present. + */ + bool supported; + /** + * @read_request: sink is capable of generating scdc read request. + */ + bool read_request; + /** + * @scrambling: sink's scrambling capabilities + */ + struct drm_scrambling scrambling; +}; + +/** + * struct drm_hdmi_dsc_cap - DSC capabilities of HDMI sink + * + * Describes the DSC support provided by HDMI 2.1 sink. + * The information is fetched fom additional HFVSDB blocks defined + * for HDMI 2.1. + */ +struct drm_hdmi_dsc_cap { + /** @v_1p2: flag for dsc1.2 version support by sink */ + bool v_1p2; + + /** @native_420: Does sink support DSC with 4:2:0 compression */ + bool native_420; + + /** + * @all_bpp: Does sink support all bpp with 4:4:4: or 4:2:2 + * compressed formats + */ + bool all_bpp; + + /** + * @bpc_supported: compressed bpc supported by sink : 10, 12 or 16 bpc + */ + u8 bpc_supported; + + /** @max_slices: maximum number of Horizontal slices supported by */ + u8 max_slices; + + /** @clk_per_slice : max pixel clock in MHz supported per slice */ + int clk_per_slice; + + /** @max_lanes : dsc max lanes supported for Fixed rate Link training */ + u8 max_lanes; + + /** @max_frl_rate_per_lane : maximum frl rate with DSC per lane */ + u8 max_frl_rate_per_lane; + + /** @total_chunk_kbytes: max size of chunks in KBs supported per line*/ + u8 total_chunk_kbytes; +}; + +/** + * struct drm_hdmi_info - runtime information about the connected HDMI sink + * + * Describes if a given display supports advanced HDMI 2.0 features. + * This information is available in CEA-861-F extension blocks (like HF-VSDB). + */ +struct drm_hdmi_info { + /** @scdc: sink's scdc support and capabilities */ + struct drm_scdc scdc; + + /** + * @y420_vdb_modes: bitmap of modes which can support ycbcr420 + * output only (not normal RGB/YCBCR444/422 outputs). The max VIC + * defined by the CEA-861-G spec is 219, so the size is 256 bits to map + * up to 256 VICs. + */ + unsigned long y420_vdb_modes[BITS_TO_LONGS(256)]; + + /** + * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 + * output also, along with normal HDMI outputs. The max VIC defined by + * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256 + * VICs. + */ + unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)]; + + /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ + u64 y420_cmdb_map; + + /** @y420_dc_modes: bitmap of deep color support index */ + u8 y420_dc_modes; + + /** @max_frl_rate_per_lane: support fixed rate link */ + u8 max_frl_rate_per_lane; + + /** @max_lanes: supported by sink */ + u8 max_lanes; + + /** @dsc_cap: DSC capabilities of the sink */ + struct drm_hdmi_dsc_cap dsc_cap; +}; + +/** + * enum drm_link_status - connector's link_status property value + * + * This enum is used as the connector's link status property value. + * It is set to the values defined in uapi. + * + * @DRM_LINK_STATUS_GOOD: DP Link is Good as a result of successful + * link training + * @DRM_LINK_STATUS_BAD: DP Link is BAD as a result of link training + * failure + */ +enum drm_link_status { + DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD, + DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD, +}; + +/** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** + * struct drm_monitor_range_info - Panel's Monitor range in EDID for + * &drm_display_info + * + * This struct is used to store a frequency range supported by panel + * as parsed from EDID's detailed monitor range descriptor block. + * + * @min_vfreq: This is the min supported refresh rate in Hz from + * EDID's detailed monitor range. + * @max_vfreq: This is the max supported refresh rate in Hz from + * EDID's detailed monitor range + */ +struct drm_monitor_range_info { + u16 min_vfreq; + u16 max_vfreq; +}; + +/** + * struct drm_luminance_range_info - Panel's luminance range for + * &drm_display_info. Calculated using data in EDID + * + * This struct is used to store a luminance range supported by panel + * as calculated using data from EDID's static hdr metadata. + * + * @min_luminance: This is the min supported luminance value + * + * @max_luminance: This is the max supported luminance value + */ +struct drm_luminance_range_info { + u32 min_luminance; + u32 max_luminance; +}; + +/** + * enum drm_privacy_screen_status - privacy screen status + * + * This enum is used to track and control the state of the integrated privacy + * screen present on some display panels, via the "privacy-screen sw-state" + * and "privacy-screen hw-state" properties. Note the _LOCKED enum values + * are only valid for the "privacy-screen hw-state" property. + * + * @PRIVACY_SCREEN_DISABLED: + * The privacy-screen on the panel is disabled + * @PRIVACY_SCREEN_ENABLED: + * The privacy-screen on the panel is enabled + * @PRIVACY_SCREEN_DISABLED_LOCKED: + * The privacy-screen on the panel is disabled and locked (cannot be changed) + * @PRIVACY_SCREEN_ENABLED_LOCKED: + * The privacy-screen on the panel is enabled and locked (cannot be changed) + */ +enum drm_privacy_screen_status { + PRIVACY_SCREEN_DISABLED = 0, + PRIVACY_SCREEN_ENABLED, + PRIVACY_SCREEN_DISABLED_LOCKED, + PRIVACY_SCREEN_ENABLED_LOCKED, +}; + +/* + * This is a consolidated colorimetry list supported by HDMI and + * DP protocol standard. The respective connectors will register + * a property with the subset of this list (supported by that + * respective protocol). Userspace will set the colorspace through + * a colorspace property which will be created and exposed to + * userspace. + */ + +/* For Default case, driver will set the colorspace */ +#define DRM_MODE_COLORIMETRY_DEFAULT 0 +/* CEA 861 Normal Colorimetry options */ +#define DRM_MODE_COLORIMETRY_NO_DATA 0 +#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 +#define DRM_MODE_COLORIMETRY_BT709_YCC 2 +/* CEA 861 Extended Colorimetry Options */ +#define DRM_MODE_COLORIMETRY_XVYCC_601 3 +#define DRM_MODE_COLORIMETRY_XVYCC_709 4 +#define DRM_MODE_COLORIMETRY_SYCC_601 5 +#define DRM_MODE_COLORIMETRY_OPYCC_601 6 +#define DRM_MODE_COLORIMETRY_OPRGB 7 +#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 +#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 +#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 +/* Additional Colorimetry extension added as part of CTA 861.G */ +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 +/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ +#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13 +#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14 +#define DRM_MODE_COLORIMETRY_BT601_YCC 15 + +/** + * enum drm_bus_flags - bus_flags info for &drm_display_info + * + * This enum defines signal polarities and clock edge information for signals on + * a bus as bitmask flags. + * + * The clock edge information is conveyed by two sets of symbols, + * DRM_BUS_FLAGS_*_DRIVE_\* and DRM_BUS_FLAGS_*_SAMPLE_\*. When this enum is + * used to describe a bus from the point of view of the transmitter, the + * \*_DRIVE_\* flags should be used. When used from the point of view of the + * receiver, the \*_SAMPLE_\* flags should be used. The \*_DRIVE_\* and + * \*_SAMPLE_\* flags alias each other, with the \*_SAMPLE_POSEDGE and + * \*_SAMPLE_NEGEDGE flags being equal to \*_DRIVE_NEGEDGE and \*_DRIVE_POSEDGE + * respectively. This simplifies code as signals are usually sampled on the + * opposite edge of the driving edge. Transmitters and receivers may however + * need to take other signal timings into account to convert between driving + * and sample edges. + */ +enum drm_bus_flags { + /** + * @DRM_BUS_FLAG_DE_LOW: + * + * The Data Enable signal is active low + */ + DRM_BUS_FLAG_DE_LOW = BIT(0), + + /** + * @DRM_BUS_FLAG_DE_HIGH: + * + * The Data Enable signal is active high + */ + DRM_BUS_FLAG_DE_HIGH = BIT(1), + + /** + * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: + * + * Data is driven on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = BIT(2), + + /** + * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: + * + * Data is driven on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = BIT(3), + + /** + * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: + * + * Data is sampled on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + + /** + * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: + * + * Data is sampled on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + + /** + * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: + * + * Data is transmitted MSB to LSB on the bus + */ + DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4), + + /** + * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: + * + * Data is transmitted LSB to MSB on the bus + */ + DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5), + + /** + * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: + * + * Sync signals are driven on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = BIT(6), + + /** + * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: + * + * Sync signals are driven on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = BIT(7), + + /** + * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: + * + * Sync signals are sampled on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, + + /** + * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: + * + * Sync signals are sampled on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, + + /** + * @DRM_BUS_FLAG_SHARP_SIGNALS: + * + * Set if the Sharp-specific signals (SPL, CLS, PS, REV) must be used + */ + DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8), +}; + +/** + * struct drm_display_info - runtime data about the connected sink + * + * Describes a given display (e.g. CRT or flat panel) and its limitations. For + * fixed display sinks like built-in panels there's not much difference between + * this and &struct drm_connector. But for sinks with a real cable this + * structure is meant to describe all the things at the other end of the cable. + * + * For sinks which provide an EDID this can be filled out by calling + * drm_add_edid_modes(). + */ +struct drm_display_info { + /** + * @width_mm: Physical width in mm. + */ + unsigned int width_mm; + + /** + * @height_mm: Physical height in mm. + */ + unsigned int height_mm; + + /** + * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs. + */ + unsigned int bpc; + + /** + * @subpixel_order: Subpixel order of LCD panels. + */ + enum subpixel_order subpixel_order; + +#define DRM_COLOR_FORMAT_RGB444 (1<<0) +#define DRM_COLOR_FORMAT_YCBCR444 (1<<1) +#define DRM_COLOR_FORMAT_YCBCR422 (1<<2) +#define DRM_COLOR_FORMAT_YCBCR420 (1<<3) + + /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** + * @color_formats: HDMI Color formats, selects between RGB and YCrCb + * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones + * as used to describe the pixel format in framebuffers, and also don't + * match the formats in @bus_formats which are shared with v4l. + */ + u32 color_formats; + + /** + * @bus_formats: Pixel data format on the wire, somewhat redundant with + * @color_formats. Array of size @num_bus_formats encoded using + * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers. + */ + const u32 *bus_formats; + /** + * @num_bus_formats: Size of @bus_formats array. + */ + unsigned int num_bus_formats; + + /** + * @bus_flags: Additional information (like pixel signal polarity) for + * the pixel data on the bus, using &enum drm_bus_flags values + * DRM_BUS_FLAGS\_. + */ + u32 bus_flags; + + /** + * @max_tmds_clock: Maximum TMDS clock rate supported by the + * sink in kHz. 0 means undefined. + */ + int max_tmds_clock; + + /** + * @dvi_dual: Dual-link DVI sink? + */ + bool dvi_dual; + + /** + * @is_hdmi: True if the sink is an HDMI device. + * + * This field shall be used instead of calling + * drm_detect_hdmi_monitor() when possible. + */ + bool is_hdmi; + + /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** + * @rgb_quant_range_selectable: Does the sink support selecting + * the RGB quantization range? + */ + bool rgb_quant_range_selectable; + + /** + * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes + * in RGB 4:4:4. Even more stuff redundant with @bus_formats. + */ + u8 edid_hdmi_rgb444_dc_modes; + + /** + * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color + * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats. + */ + u8 edid_hdmi_ycbcr444_dc_modes; + + /** + * @cea_rev: CEA revision of the HDMI sink. + */ + u8 cea_rev; + + /** + * @hdmi: advance features of a HDMI sink. + */ + struct drm_hdmi_info hdmi; + + /** + * @non_desktop: Non desktop display (HMD). + */ + bool non_desktop; + + /** + * @monitor_range: Frequency range supported by monitor range descriptor + */ + struct drm_monitor_range_info monitor_range; + + /** + * @luminance_range: Luminance range supported by panel + */ + struct drm_luminance_range_info luminance_range; + + /** + * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from + * the DisplayID VESA vendor block. 0 for conventional Single-Stream + * Transport (SST), or 2 or 4 MSO streams. + */ + u8 mso_stream_count; + + /** + * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels. + */ + u8 mso_pixel_overlap; + + /** + * @max_dsc_bpp: Maximum DSC target bitrate, if it is set to 0 the + * monitor's default value is used instead. + */ + u32 max_dsc_bpp; +}; + +int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats); + +/** + * struct drm_connector_tv_margins - TV connector related margins + * + * Describes the margins in pixels to put around the image on TV + * connectors to deal with overscan. + */ +struct drm_connector_tv_margins { + /** + * @bottom: Bottom margin in pixels. + */ + unsigned int bottom; + + /** + * @left: Left margin in pixels. + */ + unsigned int left; + + /** + * @right: Right margin in pixels. + */ + unsigned int right; + + /** + * @top: Top margin in pixels. + */ + unsigned int top; +}; + +/** + * struct drm_tv_connector_state - TV connector related states + * @subconnector: selected subconnector + * @margins: TV margins + * @mode: TV mode + * @brightness: brightness in percent + * @contrast: contrast in percent + * @flicker_reduction: flicker reduction in percent + * @overscan: overscan in percent + * @saturation: saturation in percent + * @hue: hue in percent + */ +struct drm_tv_connector_state { + enum drm_mode_subconnector subconnector; + struct drm_connector_tv_margins margins; + unsigned int mode; + unsigned int brightness; + unsigned int contrast; + unsigned int flicker_reduction; + unsigned int overscan; + unsigned int saturation; + unsigned int hue; +}; + +/** + * struct drm_connector_state - mutable connector state + */ +struct drm_connector_state { + /** @connector: backpointer to the connector */ + struct drm_connector *connector; + + /** + * @crtc: CRTC to connect connector to, NULL if disabled. + * + * Do not change this directly, use drm_atomic_set_crtc_for_connector() + * instead. + */ + struct drm_crtc *crtc; + + /** + * @best_encoder: + * + * Used by the atomic helpers to select the encoder, through the + * &drm_connector_helper_funcs.atomic_best_encoder or + * &drm_connector_helper_funcs.best_encoder callbacks. + * + * This is also used in the atomic helpers to map encoders to their + * current and previous connectors, see + * drm_atomic_get_old_connector_for_encoder() and + * drm_atomic_get_new_connector_for_encoder(). + * + * NOTE: Atomic drivers must fill this out (either themselves or through + * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will + * not return correct data to userspace. + */ + struct drm_encoder *best_encoder; + + /** + * @link_status: Connector link_status to keep track of whether link is + * GOOD or BAD to notify userspace if retraining is necessary. + */ + enum drm_link_status link_status; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions. + * + * Is only set when @crtc is NULL. + */ + struct drm_crtc_commit *commit; + + /** @tv: TV connector state */ + struct drm_tv_connector_state tv; + + /** + * @self_refresh_aware: + * + * This tracks whether a connector is aware of the self refresh state. + * It should be set to true for those connector implementations which + * understand the self refresh state. This is needed since the crtc + * registers the self refresh helpers and it doesn't know if the + * connectors downstream have implemented self refresh entry/exit. + * + * Drivers should set this to true in atomic_check if they know how to + * handle self_refresh requests. + */ + bool self_refresh_aware; + + /** + * @picture_aspect_ratio: Connector property to control the + * HDMI infoframe aspect ratio setting. + * + * The %DRM_MODE_PICTURE_ASPECT_\* values much match the + * values for &enum hdmi_picture_aspect + */ + enum hdmi_picture_aspect picture_aspect_ratio; + + /** + * @content_type: Connector property to control the + * HDMI infoframe content type setting. + * The %DRM_MODE_CONTENT_TYPE_\* values much + * match the values. + */ + unsigned int content_type; + + /** + * @hdcp_content_type: Connector property to pass the type of + * protected content. This is most commonly used for HDCP. + */ + unsigned int hdcp_content_type; + + /** + * @scaling_mode: Connector property to control the + * upscaling, mostly used for built-in panels. + */ + unsigned int scaling_mode; + + /** + * @content_protection: Connector property to request content + * protection. This is most commonly used for HDCP. + */ + unsigned int content_protection; + + /** + * @colorspace: State variable for Connector property to request + * colorspace change on Sink. This is most commonly used to switch + * to wider color gamuts like BT2020. + */ + u32 colorspace; + + /** + * @writeback_job: Writeback job for writeback connectors + * + * Holds the framebuffer and out-fence for a writeback connector. As + * the writeback completion may be asynchronous to the normal commit + * cycle, the writeback job lifetime is managed separately from the + * normal atomic state by this object. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct drm_writeback_job *writeback_job; + + /** + * @max_requested_bpc: Connector property to limit the maximum bit + * depth of the pixels. + */ + u8 max_requested_bpc; + + /** + * @max_bpc: Connector max_bpc based on the requested max_bpc property + * and the connector bpc limitations obtained from edid. + */ + u8 max_bpc; + + /** + * @privacy_screen_sw_state: See :ref:`Standard Connector + * Properties<standard_connector_properties>` + */ + enum drm_privacy_screen_status privacy_screen_sw_state; + + /** + * @hdr_output_metadata: + * DRM blob property for HDR output metadata + */ + struct drm_property_blob *hdr_output_metadata; +}; + +/** + * struct drm_connector_funcs - control connectors on a given device + * + * Each CRTC may have one or more connectors attached to it. The functions + * below allow the core DRM code to control connectors, enumerate available modes, + * etc. + */ +struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * This hook is not used by atomic drivers, remapping of the legacy DPMS + * property is entirely handled in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dpms)(struct drm_connector *connector, int mode); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_connector *connector); + + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * Note that the helper library will already hold + * &drm_mode_config.connection_mutex. Drivers which need to grab additional + * locks to avoid races with concurrent modeset changes need to use + * &drm_connector_helper_funcs.detect_ctx instead. + * + * Also note that this callback can be called no matter the + * state the connector is in. Drivers that need the underlying + * device to be powered to perform the detection will first need + * to make sure it's been properly enabled. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. + */ + enum drm_connector_status (*detect)(struct drm_connector *connector, + bool force); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ + void (*force)(struct drm_connector *connector); + + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to &drm_connector.modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * &drm_connector.modes. Furthermore it must update + * &drm_connector.status and &drm_connector.edid. If no EDID has been + * received for this output connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into &drm_connector.modes. + */ + int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_connector *connector, struct drm_property *property, + uint64_t val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the connector, light backlight control, i2c, + * DP aux or similar interfaces. It is called late in the driver load + * sequence from drm_connector_register() when registering all the + * core drm connector interfaces. Everything added from this callback + * should be unregistered in the early_unregister callback. + * + * This is called while holding &drm_connector.mutex. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_connector *connector); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the connector from + * late_register(). It is called from drm_connector_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + * + * This is called while holding &drm_connector.mutex. + */ + void (*early_unregister)(struct drm_connector *connector); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ + void (*destroy)(struct drm_connector *connector); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_connector.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_connector *connector, + struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR IOCTL. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ + int (*atomic_get_property)(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_connector_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_connector_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_connector_state *state); + + /** + * @oob_hotplug_event: + * + * This will get called when a hotplug-event for a drm-connector + * has been received from a source outside the display driver / device. + */ + void (*oob_hotplug_event)(struct drm_connector *connector); + + /** + * @debugfs_init: + * + * Allows connectors to create connector-specific debugfs files. + */ + void (*debugfs_init)(struct drm_connector *connector, struct dentry *root); +}; + +/** + * struct drm_cmdline_mode - DRM Mode passed through the kernel command-line + * + * Each connector can have an initial mode with additional options + * passed through the kernel command line. This structure allows to + * express those parameters and will be filled by the command-line + * parser. + */ +struct drm_cmdline_mode { + /** + * @name: + * + * Name of the mode. + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @specified: + * + * Has a mode been read from the command-line? + */ + bool specified; + + /** + * @refresh_specified: + * + * Did the mode have a preferred refresh rate? + */ + bool refresh_specified; + + /** + * @bpp_specified: + * + * Did the mode have a preferred BPP? + */ + bool bpp_specified; + + /** + * @xres: + * + * Active resolution on the X axis, in pixels. + */ + int xres; + + /** + * @yres: + * + * Active resolution on the Y axis, in pixels. + */ + int yres; + + /** + * @bpp: + * + * Bits per pixels for the mode. + */ + int bpp; + + /** + * @refresh: + * + * Refresh rate, in Hertz. + */ + int refresh; + + /** + * @rb: + * + * Do we need to use reduced blanking? + */ + bool rb; + + /** + * @interlace: + * + * The mode is interlaced. + */ + bool interlace; + + /** + * @cvt: + * + * The timings will be calculated using the VESA Coordinated + * Video Timings instead of looking up the mode from a table. + */ + bool cvt; + + /** + * @margins: + * + * Add margins to the mode calculation (1.8% of xres rounded + * down to 8 pixels and 1.8% of yres). + */ + bool margins; + + /** + * @force: + * + * Ignore the hotplug state of the connector, and force its + * state to one of the DRM_FORCE_* values. + */ + enum drm_connector_force force; + + /** + * @rotation_reflection: + * + * Initial rotation and reflection of the mode setup from the + * command line. See DRM_MODE_ROTATE_* and + * DRM_MODE_REFLECT_*. The only rotations supported are + * DRM_MODE_ROTATE_0 and DRM_MODE_ROTATE_180. + */ + unsigned int rotation_reflection; + + /** + * @panel_orientation: + * + * drm-connector "panel orientation" property override value, + * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set. + */ + enum drm_panel_orientation panel_orientation; + + /** + * @tv_margins: TV margins to apply to the mode. + */ + struct drm_connector_tv_margins tv_margins; +}; + +/** + * struct drm_connector - central DRM connector control structure + * + * Each connector may be connected to one or more CRTCs, or may be clonable by + * another connector if they can share a CRTC. Each connector also has a specific + * position in the broader display (referred to as a 'screen' though it could + * span multiple monitors). + */ +struct drm_connector { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @kdev: kernel device for sysfs attributes */ + struct device *kdev; + /** @attr: sysfs attributes */ + struct device_attribute *attr; + /** + * @fwnode: associated fwnode supplied by platform firmware + * + * Drivers can set this to associate a fwnode with a connector, drivers + * are expected to get a reference on the fwnode when setting this. + * drm_connector_cleanup() will call fwnode_handle_put() on this. + */ + struct fwnode_handle *fwnode; + + /** + * @head: + * + * List of all connectors on a @dev, linked from + * &drm_mode_config.connector_list. Protected by + * &drm_mode_config.connector_list_lock, but please only use + * &drm_connector_list_iter to walk this list. + */ + struct list_head head; + + /** + * @global_connector_list_entry: + * + * Connector entry in the global connector-list, used by + * drm_connector_find_by_fwnode(). + */ + struct list_head global_connector_list_entry; + + /** @base: base KMS object */ + struct drm_mode_object base; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by + * &drm_mode_config.mutex. + */ + struct mutex mutex; + + /** + * @index: Compacted connector index, which matches the position inside + * the mode_config.list for drivers not supporting hot-add/removing. Can + * be used as an array index. It is invariant over the lifetime of the + * connector. + */ + unsigned index; + + /** + * @connector_type: + * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h + */ + int connector_type; + /** @connector_type_id: index into connector type enum */ + int connector_type_id; + /** + * @interlace_allowed: + * Can this connector handle interlaced modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool interlace_allowed; + /** + * @doublescan_allowed: + * Can this connector handle doublescan? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool doublescan_allowed; + /** + * @stereo_allowed: + * Can this connector handle stereo modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool stereo_allowed; + + /** + * @ycbcr_420_allowed : This bool indicates if this connector is + * capable of handling YCBCR 420 output. While parsing the EDID + * blocks it's very helpful to know if the source is capable of + * handling YCBCR 420 outputs. + */ + bool ycbcr_420_allowed; + + /** + * @registration_state: Is this connector initializing, exposed + * (registered) with userspace, or unregistered? + * + * Protected by @mutex. + */ + enum drm_connector_registration_state registration_state; + + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by &drm_mode_config.mutex. + */ + struct list_head modes; + + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by &drm_mode_config.mutex. + */ + enum drm_connector_status status; + + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers. Protected by + * &drm_mode_config.mutex. + */ + struct list_head probed_modes; + + /** + * @display_info: Display information is filled from EDID information + * when a display is detected. For non hot-pluggable displays such as + * flat panels in embedded systems, the driver should initialize the + * &drm_display_info.width_mm and &drm_display_info.height_mm fields + * with the physical size of the display. + * + * Protected by &drm_mode_config.mutex. + */ + struct drm_display_info display_info; + + /** @funcs: connector control functions */ + const struct drm_connector_funcs *funcs; + + /** + * @edid_blob_ptr: DRM property containing EDID if present. Protected by + * &drm_mode_config.mutex. This should be updated only by calling + * drm_connector_update_edid_property(). + */ + struct drm_property_blob *edid_blob_ptr; + + /** @properties: property tracking for this connector */ + struct drm_object_properties properties; + + /** + * @scaling_mode_property: Optional atomic property to control the + * upscaling. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *scaling_mode_property; + + /** + * @vrr_capable_property: Optional property to help userspace + * query hardware support for variable refresh rate on a connector. + * connector. Drivers can add the property to a connector by + * calling drm_connector_attach_vrr_capable_property(). + * + * This should be updated only by calling + * drm_connector_set_vrr_capable_property(). + */ + struct drm_property *vrr_capable_property; + + /** + * @colorspace_property: Connector property to set the suitable + * colorspace supported by the sink. + */ + struct drm_property *colorspace_property; + + /** + * @path_blob_ptr: + * + * DRM blob property data for the DP MST path property. This should only + * be updated by calling drm_connector_set_path_property(). + */ + struct drm_property_blob *path_blob_ptr; + + /** + * @max_bpc_property: Default connector property for the max bpc to be + * driven out of the connector. + */ + struct drm_property *max_bpc_property; + + /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */ + struct drm_privacy_screen *privacy_screen; + + /** @privacy_screen_notifier: privacy-screen notifier_block */ + struct notifier_block privacy_screen_notifier; + + /** + * @privacy_screen_sw_state_property: Optional atomic property for the + * connector to control the integrated privacy screen. + */ + struct drm_property *privacy_screen_sw_state_property; + + /** + * @privacy_screen_hw_state_property: Optional atomic property for the + * connector to report the actual integrated privacy screen state. + */ + struct drm_property *privacy_screen_hw_state_property; + +#define DRM_CONNECTOR_POLL_HPD (1 << 0) +#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) +#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) + + /** + * @polled: + * + * Connector polling mode, a combination of + * + * DRM_CONNECTOR_POLL_HPD + * The connector generates hotplug events and doesn't need to be + * periodically polled. The CONNECT and DISCONNECT flags must not + * be set together with the HPD flag. + * + * DRM_CONNECTOR_POLL_CONNECT + * Periodically poll the connector for connection. + * + * DRM_CONNECTOR_POLL_DISCONNECT + * Periodically poll the connector for disconnection, without + * causing flickering even when the connector is in use. DACs should + * rarely do this without a lot of testing. + * + * Set to 0 for connectors that don't support connection status + * discovery. + */ + uint8_t polled; + + /** + * @dpms: Current dpms state. For legacy drivers the + * &drm_connector_funcs.dpms callback must update this. For atomic + * drivers, this is handled by the core atomic code, and drivers must + * only take &drm_crtc_state.active into account. + */ + int dpms; + + /** @helper_private: mid-layer private data */ + const struct drm_connector_helper_funcs *helper_private; + + /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */ + struct drm_cmdline_mode cmdline_mode; + /** @force: a DRM_FORCE_<foo> state for forced mode sets */ + enum drm_connector_force force; + /** + * @override_edid: has the EDID been overwritten through debugfs for + * testing? Do not modify outside of drm_edid_override_set() and + * drm_edid_override_reset(). + */ + bool override_edid; + /** @epoch_counter: used to detect any other changes in connector, besides status */ + u64 epoch_counter; + + /** + * @possible_encoders: Bit mask of encoders that can drive this + * connector, drm_encoder_index() determines the index into the bitfield + * and the bits are set with drm_connector_attach_encoder(). + */ + u32 possible_encoders; + + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; + +#define MAX_ELD_BYTES 128 + /** @eld: EDID-like data, if present */ + uint8_t eld[MAX_ELD_BYTES]; + /** @latency_present: AV delay info from ELD, if found */ + bool latency_present[2]; + /** + * @video_latency: Video latency info from ELD, if found. + * [0]: progressive, [1]: interlaced + */ + int video_latency[2]; + /** + * @audio_latency: audio latency info from ELD, if found + * [0]: progressive, [1]: interlaced + */ + int audio_latency[2]; + + /** + * @ddc: associated ddc adapter. + * A connector usually has its associated ddc adapter. If a driver uses + * this field, then an appropriate symbolic link is created in connector + * sysfs directory to make it easy for the user to tell which i2c + * adapter is for a particular display. + * + * The field should be set by calling drm_connector_init_with_ddc(). + */ + struct i2c_adapter *ddc; + + /** + * @null_edid_counter: track sinks that give us all zeros for the EDID. + * Needed to workaround some HW bugs where we get all 0s + */ + int null_edid_counter; + + /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ + unsigned bad_edid_counter; + + /** + * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used + * in Displayport compliance testing - Displayport Link CTS Core 1.2 + * rev1.1 4.2.2.6 + */ + bool edid_corrupt; + /** + * @real_edid_checksum: real edid checksum for corrupted edid block. + * Required in Displayport 1.4 compliance testing + * rev1.1 4.2.2.6 + */ + u8 real_edid_checksum; + + /** @debugfs_entry: debugfs directory for this connector */ + struct dentry *debugfs_entry; + + /** + * @state: + * + * Current atomic state for this connector. + * + * This is protected by &drm_mode_config.connection_mutex. Note that + * nonblocking atomic commits access the current connector state without + * taking locks. Either by going through the &struct drm_atomic_state + * pointers, see for_each_oldnew_connector_in_state(), + * for_each_old_connector_in_state() and + * for_each_new_connector_in_state(). Or through careful ordering of + * atomic commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_connector_state *state; + + /* DisplayID bits. FIXME: Extract into a substruct? */ + + /** + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. + * + * This should only be updated by calling + * drm_connector_set_tile_property(). + */ + struct drm_property_blob *tile_blob_ptr; + + /** @has_tile: is this connector connected to a tiled monitor */ + bool has_tile; + /** @tile_group: tile group for the connected monitor */ + struct drm_tile_group *tile_group; + /** @tile_is_single_monitor: whether the tile is one monitor housing */ + bool tile_is_single_monitor; + + /** @num_h_tile: number of horizontal tiles in the tile group */ + /** @num_v_tile: number of vertical tiles in the tile group */ + uint8_t num_h_tile, num_v_tile; + /** @tile_h_loc: horizontal location of this tile */ + /** @tile_v_loc: vertical location of this tile */ + uint8_t tile_h_loc, tile_v_loc; + /** @tile_h_size: horizontal size of this tile. */ + /** @tile_v_size: vertical size of this tile. */ + uint16_t tile_h_size, tile_v_size; + + /** + * @free_node: + * + * List used only by &drm_connector_list_iter to be able to clean up a + * connector from any context, in conjunction with + * &drm_mode_config.connector_free_work. + */ + struct llist_node free_node; + + /** @hdr_sink_metadata: HDR Metadata Information read from sink */ + struct hdr_sink_metadata hdr_sink_metadata; +}; + +#define obj_to_connector(x) container_of(x, struct drm_connector, base) + +int drm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type); +int drm_connector_init_with_ddc(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); +int drmm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); +void drm_connector_attach_edid_property(struct drm_connector *connector); +int drm_connector_register(struct drm_connector *connector); +void drm_connector_unregister(struct drm_connector *connector); +int drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +void drm_connector_cleanup(struct drm_connector *connector); + +static inline unsigned int drm_connector_index(const struct drm_connector *connector) +{ + return connector->index; +} + +static inline u32 drm_connector_mask(const struct drm_connector *connector) +{ + return 1 << connector->index; +} + +/** + * drm_connector_lookup - lookup connector object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: connector object id + * + * This function looks up the connector object specified by id + * add takes a reference to it. + */ +static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR); + return mo ? obj_to_connector(mo) : NULL; +} + +/** + * drm_connector_get - acquire a connector reference + * @connector: DRM connector + * + * This function increments the connector's refcount. + */ +static inline void drm_connector_get(struct drm_connector *connector) +{ + drm_mode_object_get(&connector->base); +} + +/** + * drm_connector_put - release a connector reference + * @connector: DRM connector + * + * This function decrements the connector's reference count and frees the + * object if the reference count drops to zero. + */ +static inline void drm_connector_put(struct drm_connector *connector) +{ + drm_mode_object_put(&connector->base); +} + +/** + * drm_connector_is_unregistered - has the connector been unregistered from + * userspace? + * @connector: DRM connector + * + * Checks whether or not @connector has been unregistered from userspace. + * + * Returns: + * True if the connector was unregistered, false if the connector is + * registered or has not yet been registered with userspace. + */ +static inline bool +drm_connector_is_unregistered(struct drm_connector *connector) +{ + return READ_ONCE(connector->registration_state) == + DRM_CONNECTOR_UNREGISTERED; +} + +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +const char *drm_get_connector_type_name(unsigned int connector_type); +const char *drm_get_connector_status_name(enum drm_connector_status status); +const char *drm_get_subpixel_order_name(enum subpixel_order order); +const char *drm_get_dpms_name(int val); +const char *drm_get_dvi_i_subconnector_name(int val); +const char *drm_get_dvi_i_select_name(int val); +const char *drm_get_tv_subconnector_name(int val); +const char *drm_get_tv_select_name(int val); +const char *drm_get_dp_subconnector_name(int val); +const char *drm_get_content_protection_name(int val); +const char *drm_get_hdcp_content_type_name(int val); + +int drm_mode_create_dvi_i_properties(struct drm_device *dev); +void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector); + +int drm_mode_create_tv_margin_properties(struct drm_device *dev); +int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + const char * const modes[]); +void drm_connector_attach_tv_margin_properties(struct drm_connector *conn); +int drm_mode_create_scaling_mode_property(struct drm_device *dev); +int drm_connector_attach_content_type_property(struct drm_connector *dev); +int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, + u32 scaling_mode_mask); +int drm_connector_attach_vrr_capable_property( + struct drm_connector *connector); +int drm_connector_attach_colorspace_property(struct drm_connector *connector); +int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector); +bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state, + struct drm_connector_state *new_state); +int drm_mode_create_aspect_ratio_property(struct drm_device *dev); +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); +int drm_mode_create_content_type_property(struct drm_device *dev); +int drm_mode_create_suggested_offset_properties(struct drm_device *dev); + +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path); +int drm_connector_set_tile_property(struct drm_connector *connector); +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid); +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status); +void drm_connector_set_vrr_capable_property( + struct drm_connector *connector, bool capable); +int drm_connector_set_panel_orientation( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation); +int drm_connector_set_panel_orientation_with_quirk( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation, + int width, int height); +int drm_connector_set_orientation_from_panel( + struct drm_connector *connector, + struct drm_panel *panel); +int drm_connector_attach_max_bpc_property(struct drm_connector *connector, + int min, int max); +void drm_connector_create_privacy_screen_properties(struct drm_connector *conn); +void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn); +void drm_connector_attach_privacy_screen_provider( + struct drm_connector *connector, struct drm_privacy_screen *priv); +void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state); + +/** + * struct drm_tile_group - Tile group metadata + * @refcount: reference count + * @dev: DRM device + * @id: tile group id exposed to userspace + * @group_data: Sink-private data identifying this group + * + * @group_data corresponds to displayid vend/prod/serial for external screens + * with an EDID. + */ +struct drm_tile_group { + struct kref refcount; + struct drm_device *dev; + int id; + u8 group_data[8]; +}; + +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + const char topology[8]); +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + const char topology[8]); +void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg); + +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + * + * Note that the return value of drm_connector_list_iter_next() is only valid + * up to the next drm_connector_list_iter_next() or + * drm_connector_list_iter_end() call. If you want to use the connector later, + * then you need to grab your own reference first using drm_connector_get(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); + +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_end() then you need to grab + * your own reference first using drm_connector_get(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) + +/** + * drm_connector_for_each_possible_encoder - iterate connector's possible encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + */ +#define drm_connector_for_each_possible_encoder(connector, encoder) \ + drm_for_each_encoder_mask(encoder, (connector)->dev, \ + (connector)->possible_encoders) + +#endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h new file mode 100644 index 000000000..8e1cbc751 --- /dev/null +++ b/include/drm/drm_crtc.h @@ -0,0 +1,1332 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_CRTC_H__ +#define __DRM_CRTC_H__ + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <drm/drm_modeset_lock.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_modes.h> +#include <drm/drm_device.h> +#include <drm/drm_plane.h> +#include <drm/drm_debugfs_crc.h> +#include <drm/drm_mode_config.h> + +struct drm_connector; +struct drm_device; +struct drm_framebuffer; +struct drm_mode_set; +struct drm_file; +struct drm_printer; +struct drm_self_refresh_data; +struct device_node; +struct edid; + +static inline int64_t U642I64(uint64_t val) +{ + return (int64_t)*((int64_t *)&val); +} +static inline uint64_t I642U64(int64_t val) +{ + return (uint64_t)*((uint64_t *)&val); +} + +struct drm_crtc; +struct drm_pending_vblank_event; +struct drm_plane; +struct drm_bridge; +struct drm_atomic_state; + +struct drm_crtc_helper_funcs; +struct drm_plane_helper_funcs; + +/** + * struct drm_crtc_state - mutable CRTC state + * + * Note that the distinction between @enable and @active is rather subtle: + * Flipping @active while @enable is set without changing anything else may + * never return in a failure from the &drm_mode_config_funcs.atomic_check + * callback. Userspace assumes that a DPMS On will always succeed. In other + * words: @enable controls resource assignment, @active controls the actual + * hardware state. + * + * The three booleans active_changed, connectors_changed and mode_changed are + * intended to indicate whether a full modeset is needed, rather than strictly + * describing what has changed in a commit. See also: + * drm_atomic_crtc_needs_modeset() + * + * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or + * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control + * state like @plane_mask so drivers not converted over to atomic helpers should + * not rely on these being accurate! + */ +struct drm_crtc_state { + /** @crtc: backpointer to the CRTC */ + struct drm_crtc *crtc; + + /** + * @enable: Whether the CRTC should be enabled, gates all other state. + * This controls reservations of shared resources. Actual hardware state + * is controlled by @active. + */ + bool enable; + + /** + * @active: Whether the CRTC is actively displaying (used for DPMS). + * Implies that @enable is set. The driver must not release any shared + * resources if @active is set to false but @enable still true, because + * userspace expects that a DPMS ON always succeeds. + * + * Hence drivers must not consult @active in their various + * &drm_mode_config_funcs.atomic_check callback to reject an atomic + * commit. They can consult it to aid in the computation of derived + * hardware state, since even in the DPMS OFF state the display hardware + * should be as much powered down as when the CRTC is completely + * disabled through setting @enable to false. + */ + bool active; + + /** + * @planes_changed: Planes on this crtc are updated. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. + */ + bool planes_changed : 1; + + /** + * @mode_changed: @mode or @enable has been changed. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this for any CRTC state changes that + * require a full modeset. They can also reset it to false if e.g. a + * @mode change can be done without a full modeset by only changing + * scaler settings. + */ + bool mode_changed : 1; + + /** + * @active_changed: @active has been toggled. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + */ + bool active_changed : 1; + + /** + * @connectors_changed: Connectors to this crtc have been updated, + * either in their state or routing. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this as-needed from their own atomic + * check code, e.g. from &drm_encoder_helper_funcs.atomic_check + */ + bool connectors_changed : 1; + /** + * @zpos_changed: zpos values of planes on this crtc have been updated. + * Used by the atomic helpers and drivers to steer the atomic commit + * control flow. + */ + bool zpos_changed : 1; + /** + * @color_mgmt_changed: Color management properties have changed + * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and + * drivers to steer the atomic commit control flow. + */ + bool color_mgmt_changed : 1; + + /** + * @no_vblank: + * + * Reflects the ability of a CRTC to send VBLANK events. This state + * usually depends on the pipeline configuration. If set to true, DRM + * atomic helpers will send out a fake VBLANK event during display + * updates after all hardware changes have been committed. This is + * implemented in drm_atomic_helper_fake_vblank(). + * + * One usage is for drivers and/or hardware without support for VBLANK + * interrupts. Such drivers typically do not initialize vblanking + * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs + * without initialized vblanking, this field is set to true in + * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be + * send out on each update of the display pipeline by + * drm_atomic_helper_fake_vblank(). + * + * Another usage is CRTCs feeding a writeback connector operating in + * oneshot mode. In this case the fake VBLANK event is only generated + * when a job is queued to the writeback connector, and we want the + * core to fake VBLANK events when this part of the pipeline hasn't + * changed but others had or when the CRTC and connectors are being + * disabled. + * + * __drm_atomic_helper_crtc_duplicate_state() will not reset the value + * from the current state, the CRTC driver is then responsible for + * updating this field when needed. + * + * Note that the combination of &drm_crtc_state.event == NULL and + * &drm_crtc_state.no_blank == true is valid and usually used when the + * writeback connector attached to the CRTC has a new job queued. In + * this case the driver will send the VBLANK event on its own when the + * writeback job is complete. + */ + bool no_vblank : 1; + + /** + * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to + * this CRTC. + */ + u32 plane_mask; + + /** + * @connector_mask: Bitmask of drm_connector_mask(connector) of + * connectors attached to this CRTC. + */ + u32 connector_mask; + + /** + * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders + * attached to this CRTC. + */ + u32 encoder_mask; + + /** + * @adjusted_mode: + * + * Internal display timings which can be used by the driver to handle + * differences between the mode requested by userspace in @mode and what + * is actually programmed into the hardware. + * + * For drivers using &drm_bridge, this stores hardware display timings + * used between the CRTC and the first bridge. For other drivers, the + * meaning of the adjusted_mode field is purely driver implementation + * defined information, and will usually be used to store the hardware + * display timings used between the CRTC and encoder blocks. + */ + struct drm_display_mode adjusted_mode; + + /** + * @mode: + * + * Display timings requested by userspace. The driver should try to + * match the refresh rate as close as possible (but note that it's + * undefined what exactly is close enough, e.g. some of the HDMI modes + * only differ in less than 1% of the refresh rate). The active width + * and height as observed by userspace for positioning planes must match + * exactly. + * + * For external connectors where the sink isn't fixed (like with a + * built-in panel), this mode here should match the physical mode on the + * wire to the last details (i.e. including sync polarities and + * everything). + */ + struct drm_display_mode mode; + + /** + * @mode_blob: &drm_property_blob for @mode, for exposing the mode to + * atomic userspace. + */ + struct drm_property_blob *mode_blob; + + /** + * @degamma_lut: + * + * Lookup table for converting framebuffer pixel data before apply the + * color conversion matrix @ctm. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + + /** + * @ctm: + * + * Color transformation matrix. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is a &struct drm_color_ctm. + */ + struct drm_property_blob *ctm; + + /** + * @gamma_lut: + * + * Lookup table for converting pixel data after the color conversion + * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not + * NULL) is an array of &struct drm_color_lut. + * + * Note that for mostly historical reasons stemming from Xorg heritage, + * this is also used to store the color map (also sometimes color lut, + * CLUT or color palette) for indexed formats like DRM_FORMAT_C8. + */ + struct drm_property_blob *gamma_lut; + + /** + * @target_vblank: + * + * Target vertical blank period when a page flip + * should take effect. + */ + u32 target_vblank; + + /** + * @async_flip: + * + * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy + * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet. + */ + bool async_flip; + + /** + * @vrr_enabled: + * + * Indicates if variable refresh rate should be enabled for the CRTC. + * Support for the requested vrr state will depend on driver and + * hardware capabiltiy - lacking support is not treated as failure. + */ + bool vrr_enabled; + + /** + * @self_refresh_active: + * + * Used by the self refresh helpers to denote when a self refresh + * transition is occurring. This will be set on enable/disable callbacks + * when self refresh is being enabled or disabled. In some cases, it may + * not be desirable to fully shut off the crtc during self refresh. + * CRTC's can inspect this flag and determine the best course of action. + */ + bool self_refresh_active; + + /** + * @scaling_filter: + * + * Scaling filter to be applied + */ + enum drm_scaling_filter scaling_filter; + + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of the + * state update. The driver must send out the event when the atomic + * commit operation completes. There are two cases: + * + * - The event is for a CRTC which is being disabled through this + * atomic commit. In that case the event can be send out any time + * after the hardware has stopped scanning out the current + * framebuffers. It should contain the timestamp and counter for the + * last vblank before the display pipeline was shut off. The simplest + * way to achieve that is calling drm_crtc_send_vblank_event() + * somewhen after drm_crtc_vblank_off() has been called. + * + * - For a CRTC which is enabled at the end of the commit (even when it + * undergoes an full modeset) the vblank timestamp and counter must + * be for the vblank right before the first frame that scans out the + * new set of buffers. Again the event can only be sent out after the + * hardware has stopped scanning out the old buffers. + * + * - Events for disabled CRTCs are not allowed, and drivers can ignore + * that case. + * + * For very simple hardware without VBLANK interrupt, enabling + * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers + * send a fake VBLANK event at the end of the display update after all + * hardware changes have been applied. See + * drm_atomic_helper_fake_vblank(). + * + * For more complex hardware this + * can be handled by the drm_crtc_send_vblank_event() function, + * which the driver should call on the provided event upon completion of + * the atomic commit. Note that if the driver supports vblank signalling + * and timestamping the vblank counters and timestamps must agree with + * the ones returned from page flip events. With the current vblank + * helper infrastructure this can be achieved by holding a vblank + * reference while the page flip is pending, acquired through + * drm_crtc_vblank_get() and released with drm_crtc_vblank_put(). + * Drivers are free to implement their own vblank counter and timestamp + * tracking though, e.g. if they have accurate timestamp registers in + * hardware. + * + * For hardware which supports some means to synchronize vblank + * interrupt delivery with committing display state there's also + * drm_crtc_arm_vblank_event(). See the documentation of that function + * for a detailed discussion of the constraints it needs to be used + * safely. + * + * If the device can't notify of flip completion in a race-free way + * at all, then the event should be armed just after the page flip is + * committed. In the worst case the driver will send the event to + * userspace one frame too late. This doesn't allow for a real atomic + * update, but it should avoid tearing. + */ + struct drm_pending_vblank_event *event; + + /** + * @commit: + * + * This tracks how the commit for this update proceeds through the + * various phases. This is never cleared, except when we destroy the + * state, so that subsequent commits can synchronize with previous ones. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +/** + * struct drm_crtc_funcs - control CRTCs for a given device + * + * The drm_crtc_funcs structure is the central CRTC management structure + * in the DRM. Each CRTC controls one or more connectors (note that the name + * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. + * connectors, not just CRTs). + * + * Each driver is responsible for filling out this structure at startup time, + * in addition to providing other modesetting features, like i2c and DDC + * bus accessors. + */ +struct drm_crtc_funcs { + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_crtc *crtc); + + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height, + int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_move)(struct drm_crtc *crtc, int x, int y); + + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * Atomic drivers who want to support gamma tables should implement the + * atomic color management support, enabled by calling + * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma + * interface through the drm_atomic_helper_legacy_gamma_set() + * compatibility implementation. + */ + int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up CRTC resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_crtc *crtc); + + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * &struct drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_config)(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls this hook with a pointer to the new + * frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a &struct drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * page flip operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. + */ + int (*page_flip)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip_target: + * + * Same as @page_flip but with an additional parameter specifying the + * absolute target vertical blank period (as reported by + * drm_crtc_vblank_count()) when the flip should take effect. + * + * Note that the core code calls drm_crtc_vblank_get before this entry + * point, and will call drm_crtc_vblank_put if this entry point returns + * any non-0 error code. It's the driver's responsibility to call + * drm_crtc_vblank_put after this entry point returns 0, typically when + * the flip completes. + */ + int (*page_flip_target)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, uint32_t target, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_crtc_state should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_crtc.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC IOCTL. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ + int (*atomic_get_property)(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the crtc like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_crtc *crtc); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the crtc from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_crtc *crtc); + + /** + * @set_crc_source: + * + * Changes the source of CRC checksums of frames at the request of + * userspace, typically for testing purposes. The sources available are + * specific of each driver and a %NULL value indicates that CRC + * generation is to be switched off. + * + * When CRC generation is enabled, the driver should call + * drm_crtc_add_crc_entry() at each frame, providing any information + * that characterizes the frame contents in the crcN arguments, as + * provided from the configured source. Drivers must accept an "auto" + * source name that will select a default source for this CRTC. + * + * This may trigger an atomic modeset commit if necessary, to enable CRC + * generation. + * + * Note that "auto" can depend upon the current modeset configuration, + * e.g. it could pick an encoder or output specific CRC sampling point. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_crc_source)(struct drm_crtc *crtc, const char *source); + + /** + * @verify_crc_source: + * + * verifies the source of CRC checksums of frames before setting the + * source for CRC and during crc open. Source parameter can be NULL + * while disabling crc source. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*verify_crc_source)(struct drm_crtc *crtc, const char *source, + size_t *values_cnt); + /** + * @get_crc_sources: + * + * Driver callback for getting a list of all the available sources for + * CRC generation. This callback depends upon verify_crc_source, So + * verify_crc_source callback should be implemented before implementing + * this. Driver can pass full list of available crc sources, this + * callback does the verification on each crc-source before passing it + * to userspace. + * + * This callback is optional if the driver does not support exporting of + * possible CRC sources list. + * + * RETURNS: + * + * a constant character pointer to the list of all the available CRC + * sources. On failure driver should return NULL. count should be + * updated with number of sources in list. if zero we don't process any + * source from the list. + */ + const char *const *(*get_crc_sources)(struct drm_crtc *crtc, + size_t *count); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_crtc_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_crtc_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_crtc_state *state); + + /** + * @get_vblank_counter: + * + * Driver callback for fetching a raw hardware vblank counter for the + * CRTC. It's meant to be used by new drivers as the replacement of + * &drm_driver.get_vblank_counter hook. + * + * This callback is optional. If a device doesn't have a hardware + * counter, the driver can simply leave the hook as NULL. The DRM core + * will account for missed vblank events while interrupts where disabled + * based on system timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code, as long as drivers call + * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or + * enabling a CRTC. + * + * See also &drm_device.vblank_disable_immediate and + * &drm_device.max_vblank_count. + * + * Returns: + * + * Raw vblank counter value. + */ + u32 (*get_vblank_counter)(struct drm_crtc *crtc); + + /** + * @enable_vblank: + * + * Enable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.enable_vblank hook. + * + * Returns: + * + * Zero on success, appropriate errno if the vblank interrupt cannot + * be enabled. + */ + int (*enable_vblank)(struct drm_crtc *crtc); + + /** + * @disable_vblank: + * + * Disable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.disable_vblank hook. + */ + void (*disable_vblank)(struct drm_crtc *crtc); + + /** + * @get_vblank_timestamp: + * + * Called by drm_get_last_vbltimestamp(). Should return a precise + * timestamp when the most recent vblank interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of vblank will start scanning out, + * the time immediately after end of the vblank interval. If the + * @crtc is currently inside vblank, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * Parameters: + * + * crtc: + * CRTC for which timestamp should be returned. + * max_error: + * Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * vblank_time: + * Target location for returned vblank timestamp. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq quirks + * if flag is set. + * + * Returns: + * + * True on success, false on failure, which means the core should + * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). + */ + bool (*get_vblank_timestamp)(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); +}; + +/** + * struct drm_crtc - central CRTC control structure + * + * Each CRTC may have one or more connectors associated with it. This structure + * allows the CRTC to be controlled. + */ +struct drm_crtc { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @port: OF node used by drm_of_find_possible_crtcs(). */ + struct device_node *port; + /** + * @head: + * + * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * This provides a read lock for the overall CRTC state (mode, dpms + * state, ...) and a write lock for everything which can be update + * without a full modeset (fb, cursor data, CRTC properties ...). A full + * modeset also need to grab &drm_mode_config.connection_mutex. + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base KMS object for ID tracking etc. */ + struct drm_mode_object base; + + /** + * @primary: + * Primary plane for this CRTC. Note that this is only + * relevant for legacy IOCTL, it specifies the plane implicitly used by + * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *primary; + + /** + * @cursor: + * Cursor plane for this CRTC. Note that this is only relevant for + * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR + * and SETCURSOR2 IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *cursor; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the CRTC. + */ + unsigned index; + + /** + * @cursor_x: Current x position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_x + * of the cursor plane instead. + */ + int cursor_x; + /** + * @cursor_y: Current y position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_y + * of the cursor plane instead. + */ + int cursor_y; + + /** + * @enabled: + * + * Is this CRTC enabled? Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.enable and + * &drm_crtc_state.active. Atomic drivers can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + bool enabled; + + /** + * @mode: + * + * Current mode timings. Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.mode. Atomic drivers + * can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + struct drm_display_mode mode; + + /** + * @hwmode: + * + * Programmed mode in hw, after adjustments for encoders, crtc, panel + * scaling etc. Should only be used by legacy drivers, for high + * precision vblank timestamps in + * drm_crtc_vblank_helper_get_vblank_timestamp(). + * + * Note that atomic drivers should not use this, but instead use + * &drm_crtc_state.adjusted_mode. And for high-precision timestamps + * drm_crtc_vblank_helper_get_vblank_timestamp() used + * &drm_vblank_crtc.hwmode, + * which is filled out by calling drm_calc_timestamping_constants(). + */ + struct drm_display_mode hwmode; + + /** + * @x: + * x position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_x of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int x; + /** + * @y: + * y position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_y of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int y; + + /** @funcs: CRTC control functions */ + const struct drm_crtc_funcs *funcs; + + /** + * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up + * by calling drm_mode_crtc_set_gamma_size(). + * + * Note that atomic drivers need to instead use + * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). + */ + uint32_t gamma_size; + + /** + * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and + * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). + * + * Note that atomic drivers need to instead use + * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). + */ + uint16_t *gamma_store; + + /** @helper_private: mid-layer private data */ + const struct drm_crtc_helper_funcs *helper_private; + + /** @properties: property tracking for this CRTC */ + struct drm_object_properties properties; + + /** + * @scaling_filter_property: property to apply a particular filter while + * scaling. + */ + struct drm_property *scaling_filter_property; + + /** + * @state: + * + * Current atomic state for this CRTC. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current CRTC state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and + * for_each_new_crtc_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_crtc_state *state; + + /** + * @commit_list: + * + * List of &drm_crtc_commit structures tracking pending commits. + * Protected by @commit_lock. This list holds its own full reference, + * as does the ongoing commit. + * + * "Note that the commit for a state change is also tracked in + * &drm_crtc_state.commit. For accessing the immediately preceding + * commit in an atomic update it is recommended to just use that + * pointer in the old CRTC state, since accessing that doesn't need + * any locking or list-walking. @commit_list should only be used to + * stall for framebuffer cleanup that's signalled through + * &drm_crtc_commit.cleanup_done." + */ + struct list_head commit_list; + + /** + * @commit_lock: + * + * Spinlock to protect @commit_list. + */ + spinlock_t commit_lock; + + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; + + /** + * @crc: + * + * Configuration settings of CRC capture. + */ + struct drm_crtc_crc crc; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the CRTC's timeline. + */ + unsigned long fence_seqno; + + /** + * @timeline_name: + * + * The name of the CRTC's fence timeline. + */ + char timeline_name[32]; + + /** + * @self_refresh_data: Holds the state for the self refresh helpers + * + * Initialized via drm_self_refresh_helper_init(). + */ + struct drm_self_refresh_data *self_refresh_data; +}; + +/** + * struct drm_mode_set - new values for a CRTC config change + * @fb: framebuffer to use for new config + * @crtc: CRTC whose configuration we're about to change + * @mode: mode timings to use + * @x: position of this CRTC relative to @fb + * @y: position of this CRTC relative to @fb + * @connectors: array of connectors to drive with this CRTC if possible + * @num_connectors: size of @connectors array + * + * This represents a modeset configuration for the legacy SETCRTC ioctl and is + * also used internally. Atomic drivers instead use &drm_atomic_state. + */ +struct drm_mode_set { + struct drm_framebuffer *fb; + struct drm_crtc *crtc; + struct drm_display_mode *mode; + + uint32_t x; + uint32_t y; + + struct drm_connector **connectors; + size_t num_connectors; +}; + +#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) + +__printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); + +__printf(6, 7) +int drmm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); + +void drm_crtc_cleanup(struct drm_crtc *crtc); + +__printf(7, 8) +void *__drmm_crtc_alloc_with_planes(struct drm_device *dev, + size_t size, size_t offset, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); + +/** + * drmm_crtc_alloc_with_planes - Allocate and initialize a new CRTC object with + * specified primary and cursor planes. + * @dev: DRM device + * @type: the type of the struct which contains struct &drm_crtc + * @member: the name of the &drm_crtc within @type. + * @primary: Primary plane for CRTC + * @cursor: Cursor plane for CRTC + * @funcs: callbacks for the new CRTC + * @name: printf style format string for the CRTC name, or NULL for default name + * + * Allocates and initializes a new crtc object. Cleanup is automatically + * handled through registering drmm_crtc_cleanup() with drmm_add_action(). + * + * The @drm_crtc_funcs.destroy hook must be NULL. + * + * Returns: + * Pointer to new crtc, or ERR_PTR on failure. + */ +#define drmm_crtc_alloc_with_planes(dev, type, member, primary, cursor, funcs, name, ...) \ + ((type *)__drmm_crtc_alloc_with_planes(dev, sizeof(type), \ + offsetof(type, member), \ + primary, cursor, funcs, \ + name, ##__VA_ARGS__)) + +/** + * drm_crtc_index - find the index of a registered CRTC + * @crtc: CRTC to find index for + * + * Given a registered CRTC, return the index of that CRTC within a DRM + * device's list of CRTCs. + */ +static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) +{ + return crtc->index; +} + +/** + * drm_crtc_mask - find the mask of a registered CRTC + * @crtc: CRTC to find mask for + * + * Given a registered CRTC, return the mask bit of that CRTC for the + * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. + */ +static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) +{ + return 1 << drm_crtc_index(crtc); +} + +int drm_mode_set_config_internal(struct drm_mode_set *set); +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); + +/** + * drm_crtc_find - look up a CRTC object from its ID + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: &drm_mode_object ID + * + * This can be used to look up a CRTC from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ +static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); + return mo ? obj_to_crtc(mo) : NULL; +} + +/** + * drm_for_each_crtc - iterate over all CRTCs + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ +#define drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) + +/** + * drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ +#define drm_for_each_crtc_reverse(crtc, dev) \ + list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head) + +int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, + unsigned int supported_filters); + +#endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h new file mode 100644 index 000000000..a6d520d5b --- /dev/null +++ b/include/drm/drm_crtc_helper.h @@ -0,0 +1,61 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if they code they do use at least + * provides a consistent interface and operation to userspace + */ + +#ifndef __DRM_CRTC_HELPER_H__ +#define __DRM_CRTC_HELPER_H__ + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/idr.h> + +#include <linux/fb.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_modeset_helper.h> + +void drm_helper_disable_unused_functions(struct drm_device *dev); +int drm_crtc_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); +bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, + struct drm_framebuffer *old_fb); +bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +bool drm_helper_encoder_in_use(struct drm_encoder *encoder); + +int drm_helper_connector_dpms(struct drm_connector *connector, int mode); + +void drm_helper_resume_force_mode(struct drm_device *dev); +int drm_helper_force_disable_all(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h new file mode 100644 index 000000000..effda42cc --- /dev/null +++ b/include/drm/drm_damage_helper.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Deepak Rawat <drawat@vmware.com> + * + **************************************************************************/ + +#ifndef DRM_DAMAGE_HELPER_H_ +#define DRM_DAMAGE_HELPER_H_ + +#include <drm/drm_atomic_helper.h> + +/** + * drm_atomic_for_each_plane_damage - Iterator macro for plane damage. + * @iter: The iterator to advance. + * @rect: Return a rectangle in fb coordinate clipped to plane src. + * + * Note that if the first call to iterator macro return false then no need to do + * plane update. Iterator will return full plane src when damage is not passed + * by user-space. + */ +#define drm_atomic_for_each_plane_damage(iter, rect) \ + while (drm_atomic_helper_damage_iter_next(iter, rect)) + +/** + * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator. + * + * This structure tracks state needed to walk the list of plane damage clips. + */ +struct drm_atomic_helper_damage_iter { + /* private: Plane src in whole number. */ + struct drm_rect plane_src; + /* private: Rectangles in plane damage blob. */ + const struct drm_rect *clips; + /* private: Number of rectangles in plane damage blob. */ + uint32_t num_clips; + /* private: Current clip iterator is advancing on. */ + uint32_t curr_clip; + /* private: Whether need full plane update. */ + bool full_update; +}; + +void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, + struct drm_plane_state *plane_state); +int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips); +void +drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, + const struct drm_plane_state *old_state, + const struct drm_plane_state *new_state); +bool +drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, + struct drm_rect *rect); +bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, + struct drm_plane_state *state, + struct drm_rect *rect); + +#endif diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h new file mode 100644 index 000000000..2188dc839 --- /dev/null +++ b/include/drm/drm_debugfs.h @@ -0,0 +1,101 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DEBUGFS_H_ +#define _DRM_DEBUGFS_H_ + +#include <linux/types.h> +#include <linux/seq_file.h> +/** + * struct drm_info_list - debugfs info list entry + * + * This structure represents a debugfs file to be created by the drm + * core. + */ +struct drm_info_list { + /** @name: file name */ + const char *name; + /** + * @show: + * + * Show callback. &seq_file->private will be set to the &struct + * drm_info_node corresponding to the instance of this info on a given + * &struct drm_minor. + */ + int (*show)(struct seq_file*, void*); + /** @driver_features: Required driver features for this entry */ + u32 driver_features; + /** @data: Driver-private data, should not be device-specific. */ + void *data; +}; + +/** + * struct drm_info_node - Per-minor debugfs node structure + * + * This structure represents a debugfs file, as an instantiation of a &struct + * drm_info_list on a &struct drm_minor. + * + * FIXME: + * + * No it doesn't make a hole lot of sense that we duplicate debugfs entries for + * both the render and the primary nodes, but that's how this has organically + * grown. It should probably be fixed, with a compatibility link, if needed. + */ +struct drm_info_node { + /** @minor: &struct drm_minor for this node. */ + struct drm_minor *minor; + /** @info_ent: template for this node. */ + const struct drm_info_list *info_ent; + /* private: */ + struct list_head list; + struct dentry *dent; +}; + +#if defined(CONFIG_DEBUG_FS) +void drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor); +#else +static inline void drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor) +{} + +static inline int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor) +{ + return 0; +} +#endif + +#endif /* _DRM_DEBUGFS_H_ */ diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h new file mode 100644 index 000000000..b225eeb30 --- /dev/null +++ b/include/drm/drm_debugfs_crc.h @@ -0,0 +1,74 @@ +/* + * Copyright © 2016 Collabora Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_DEBUGFS_CRC_H__ +#define __DRM_DEBUGFS_CRC_H__ + +#define DRM_MAX_CRC_NR 10 + +/** + * struct drm_crtc_crc_entry - entry describing a frame's content + * @has_frame_counter: whether the source was able to provide a frame number + * @frame: number of the frame this CRC is about, if @has_frame_counter is true + * @crc: array of values that characterize the frame + */ +struct drm_crtc_crc_entry { + bool has_frame_counter; + uint32_t frame; + uint32_t crcs[DRM_MAX_CRC_NR]; +}; + +#define DRM_CRC_ENTRIES_NR 128 + +/** + * struct drm_crtc_crc - data supporting CRC capture on a given CRTC + * @lock: protects the fields in this struct + * @source: name of the currently configured source of CRCs + * @opened: whether userspace has opened the data file for reading + * @overflow: whether an overflow occured. + * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR + * @head: head of circular queue + * @tail: tail of circular queue + * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR + * @wq: workqueue used to synchronize reading and writing + */ +struct drm_crtc_crc { + spinlock_t lock; + const char *source; + bool opened, overflow; + struct drm_crtc_crc_entry *entries; + int head, tail; + size_t values_cnt; + wait_queue_head_t wq; +}; + +#if defined(CONFIG_DEBUG_FS) +int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs); +#else +static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs) +{ + return -EINVAL; +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* __DRM_DEBUGFS_CRC_H__ */ diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h new file mode 100644 index 000000000..9923c7a68 --- /dev/null +++ b/include/drm/drm_device.h @@ -0,0 +1,365 @@ +#ifndef _DRM_DEVICE_H_ +#define _DRM_DEVICE_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/idr.h> + +#include <drm/drm_legacy.h> +#include <drm/drm_mode_config.h> + +struct drm_driver; +struct drm_minor; +struct drm_master; +struct drm_vblank_crtc; +struct drm_vma_offset_manager; +struct drm_vram_mm; +struct drm_fb_helper; + +struct inode; + +struct pci_dev; +struct pci_controller; + + +/** + * enum switch_power_state - power state of drm device + */ + +enum switch_power_state { + /** @DRM_SWITCH_POWER_ON: Power state is ON */ + DRM_SWITCH_POWER_ON = 0, + + /** @DRM_SWITCH_POWER_OFF: Power state is OFF */ + DRM_SWITCH_POWER_OFF = 1, + + /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */ + DRM_SWITCH_POWER_CHANGING = 2, + + /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */ + DRM_SWITCH_POWER_DYNAMIC_OFF = 3, +}; + +/** + * struct drm_device - DRM device structure + * + * This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + /** @if_version: Highest interface version set */ + int if_version; + + /** @ref: Object ref-count */ + struct kref ref; + + /** @dev: Device structure of bus-device */ + struct device *dev; + + /** + * @managed: + * + * Managed resources linked to the lifetime of this &drm_device as + * tracked by @ref. + */ + struct { + /** @managed.resources: managed resources list */ + struct list_head resources; + /** @managed.final_kfree: pointer for final kfree() call */ + void *final_kfree; + /** @managed.lock: protects @managed.resources */ + spinlock_t lock; + } managed; + + /** @driver: DRM driver managing the device */ + const struct drm_driver *driver; + + /** + * @dev_private: + * + * DRM driver private data. This is deprecated and should be left set to + * NULL. + * + * Instead of using this pointer it is recommended that drivers use + * devm_drm_dev_alloc() and embed struct &drm_device in their larger + * per-device structure. + */ + void *dev_private; + + /** @primary: Primary node */ + struct drm_minor *primary; + + /** @render: Render node */ + struct drm_minor *render; + + /** + * @registered: + * + * Internally used by drm_dev_register() and drm_connector_register(). + */ + bool registered; + + /** + * @master: + * + * Currently active master for this device. + * Protected by &master_mutex + */ + struct drm_master *master; + + /** + * @driver_features: per-device driver features + * + * Drivers can clear specific flags here to disallow + * certain features on a per-device basis while still + * sharing a single &struct drm_driver instance across + * all devices. + */ + u32 driver_features; + + /** + * @unplugged: + * + * Flag to tell if the device has been unplugged. + * See drm_dev_enter() and drm_dev_is_unplugged(). + */ + bool unplugged; + + /** @anon_inode: inode for private address-space */ + struct inode *anon_inode; + + /** @unique: Unique name of the device */ + char *unique; + + /** + * @struct_mutex: + * + * Lock for others (not &drm_minor.master and &drm_file.is_master) + * + * WARNING: + * Only drivers annotated with DRIVER_LEGACY should be using this. + */ + struct mutex struct_mutex; + + /** + * @master_mutex: + * + * Lock for &drm_minor.master and &drm_file.is_master + */ + struct mutex master_mutex; + + /** + * @open_count: + * + * Usage counter for outstanding files open, + * protected by drm_global_mutex + */ + atomic_t open_count; + + /** @filelist_mutex: Protects @filelist. */ + struct mutex filelist_mutex; + /** + * @filelist: + * + * List of userspace clients, linked through &drm_file.lhead. + */ + struct list_head filelist; + + /** + * @filelist_internal: + * + * List of open DRM files for in-kernel clients. + * Protected by &filelist_mutex. + */ + struct list_head filelist_internal; + + /** + * @clientlist_mutex: + * + * Protects &clientlist access. + */ + struct mutex clientlist_mutex; + + /** + * @clientlist: + * + * List of in-kernel clients. Protected by &clientlist_mutex. + */ + struct list_head clientlist; + + /** + * @vblank_disable_immediate: + * + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. + */ + bool vblank_disable_immediate; + + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ + struct drm_vblank_crtc *vblank; + + /** + * @vblank_time_lock: + * + * Protects vblank count and time updates during vblank enable/disable + */ + spinlock_t vblank_time_lock; + /** + * @vbl_lock: Top-level vblank references lock, wraps the low-level + * @vblank_time_lock. + */ + spinlock_t vbl_lock; + + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the statically configured device wide maximum. The driver + * can instead choose to use a runtime configurable per-crtc value + * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count + * must be left at zero. See drm_crtc_set_max_vblank_count() on how + * to use the per-crtc value. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; + + /** @vblank_event_list: List of vblank events */ + struct list_head vblank_event_list; + + /** + * @event_lock: + * + * Protects @vblank_event_list and event delivery in + * general. See drm_send_event() and drm_send_event_locked(). + */ + spinlock_t event_lock; + + /** @num_crtcs: Number of CRTCs on this device */ + unsigned int num_crtcs; + + /** @mode_config: Current mode config */ + struct drm_mode_config mode_config; + + /** @object_name_lock: GEM information */ + struct mutex object_name_lock; + + /** @object_name_idr: GEM information */ + struct idr object_name_idr; + + /** @vma_offset_manager: GEM information */ + struct drm_vma_offset_manager *vma_offset_manager; + + /** @vram_mm: VRAM MM memory manager */ + struct drm_vram_mm *vram_mm; + + /** + * @switch_power_state: + * + * Power state of the client. + * Used by drivers supporting the switcheroo driver. + * The state is maintained in the + * &vga_switcheroo_client_ops.set_gpu_state callback + */ + enum switch_power_state switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; + + /* Everything below here is for legacy driver, never use! */ + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + /* List of devices per driver for stealth attach cleanup */ + struct list_head legacy_dev_list; + +#ifdef __alpha__ + /** @hose: PCI hose, only used on ALPHA platforms. */ + struct pci_controller *hose; +#endif + + /* AGP data */ + struct drm_agp_head *agp; + + /* Context handle management - linked list of context handles */ + struct list_head ctxlist; + + /* Context handle management - mutex for &ctxlist */ + struct mutex ctxlist_mutex; + + /* Context handle management */ + struct idr ctx_idr; + + /* Memory management - linked list of regions */ + struct list_head maplist; + + /* Memory management - user token hash table for maps */ + struct drm_open_hash map_hash; + + /* Context handle management - list of vmas (for debugging) */ + struct list_head vmalist; + + /* Optional pointer for DMA support */ + struct drm_device_dma *dma; + + /* Context swapping flag */ + __volatile__ long context_flag; + + /* Last current context */ + int last_context; + + /* Lock for &buf_use and a few other things. */ + spinlock_t buf_lock; + + /* Usage counter for buffers in use -- cannot alloc */ + int buf_use; + + /* Buffer allocation in progress */ + atomic_t buf_alloc; + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + /* Scatter gather memory */ + struct drm_sg_mem *sg; + + /* IRQs */ + bool irq_enabled; + int irq; +#endif +}; + +#endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h new file mode 100644 index 000000000..49649eb84 --- /dev/null +++ b/include/drm/drm_displayid.h @@ -0,0 +1,160 @@ +/* + * Copyright © 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef DRM_DISPLAYID_H +#define DRM_DISPLAYID_H + +#include <linux/types.h> +#include <linux/bits.h> + +struct drm_edid; + +#define VESA_IEEE_OUI 0x3a0292 + +/* DisplayID Structure versions */ +#define DISPLAY_ID_STRUCTURE_VER_12 0x12 +#define DISPLAY_ID_STRUCTURE_VER_20 0x20 + +/* DisplayID Structure v1r2 Data Blocks */ +#define DATA_BLOCK_PRODUCT_ID 0x00 +#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 +#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 +#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 +#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 +#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 +#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 +#define DATA_BLOCK_VESA_TIMING 0x07 +#define DATA_BLOCK_CEA_TIMING 0x08 +#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 +#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a +#define DATA_BLOCK_GP_ASCII_STRING 0x0b +#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c +#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d +#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e +#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f +#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 +#define DATA_BLOCK_TILED_DISPLAY 0x12 +#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f +#define DATA_BLOCK_CTA 0x81 + +/* DisplayID Structure v2r0 Data Blocks */ +#define DATA_BLOCK_2_PRODUCT_ID 0x20 +#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21 +#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22 +#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23 +#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24 +#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25 +#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26 +#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27 +#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28 +#define DATA_BLOCK_2_CONTAINER_ID 0x29 +#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e +#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81 + +/* DisplayID Structure v1r2 Product Type */ +#define PRODUCT_TYPE_EXTENSION 0 +#define PRODUCT_TYPE_TEST 1 +#define PRODUCT_TYPE_PANEL 2 +#define PRODUCT_TYPE_MONITOR 3 +#define PRODUCT_TYPE_TV 4 +#define PRODUCT_TYPE_REPEATER 5 +#define PRODUCT_TYPE_DIRECT_DRIVE 6 + +/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */ +#define PRIMARY_USE_EXTENSION 0 +#define PRIMARY_USE_TEST 1 +#define PRIMARY_USE_GENERIC 2 +#define PRIMARY_USE_TV 3 +#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4 +#define PRIMARY_USE_DESKTOP_GAMING 5 +#define PRIMARY_USE_PRESENTATION 6 +#define PRIMARY_USE_HEAD_MOUNTED_VR 7 +#define PRIMARY_USE_HEAD_MOUNTED_AR 8 + +struct displayid_header { + u8 rev; + u8 bytes; + u8 prod_id; + u8 ext_count; +} __packed; + +struct displayid_block { + u8 tag; + u8 rev; + u8 num_bytes; +} __packed; + +struct displayid_tiled_block { + struct displayid_block base; + u8 tile_cap; + u8 topo[3]; + u8 tile_size[4]; + u8 tile_pixel_bezel[5]; + u8 topology_id[8]; +} __packed; + +struct displayid_detailed_timings_1 { + u8 pixel_clock[3]; + u8 flags; + u8 hactive[2]; + u8 hblank[2]; + u8 hsync[2]; + u8 hsw[2]; + u8 vactive[2]; + u8 vblank[2]; + u8 vsync[2]; + u8 vsw[2]; +} __packed; + +struct displayid_detailed_timing_block { + struct displayid_block base; + struct displayid_detailed_timings_1 timings[]; +}; + +#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0) +#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5) + +struct displayid_vesa_vendor_specific_block { + struct displayid_block base; + u8 oui[3]; + u8 data_structure_type; + u8 mso; +} __packed; + +/* DisplayID iteration */ +struct displayid_iter { + const struct drm_edid *drm_edid; + + const u8 *section; + int length; + int idx; + int ext_index; +}; + +void displayid_iter_edid_begin(const struct drm_edid *drm_edid, + struct displayid_iter *iter); +const struct displayid_block * +__displayid_iter_next(struct displayid_iter *iter); +#define displayid_iter_for_each(__block, __iter) \ + while (((__block) = __displayid_iter_next(__iter))) +void displayid_iter_end(struct displayid_iter *iter); + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h new file mode 100644 index 000000000..f6159acb8 --- /dev/null +++ b/include/drm/drm_drv.h @@ -0,0 +1,607 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DRV_H_ +#define _DRM_DRV_H_ + +#include <linux/list.h> +#include <linux/irqreturn.h> + +#include <drm/drm_device.h> + +struct drm_file; +struct drm_gem_object; +struct drm_master; +struct drm_minor; +struct dma_buf; +struct dma_buf_attachment; +struct drm_display_mode; +struct drm_mode_create_dumb; +struct drm_printer; +struct sg_table; + +/** + * enum drm_driver_feature - feature flags + * + * See &drm_driver.driver_features, drm_device.driver_features and + * drm_core_check_feature(). + */ +enum drm_driver_feature { + /** + * @DRIVER_GEM: + * + * Driver use the GEM memory manager. This should be set for all modern + * drivers. + */ + DRIVER_GEM = BIT(0), + /** + * @DRIVER_MODESET: + * + * Driver supports mode setting interfaces (KMS). + */ + DRIVER_MODESET = BIT(1), + /** + * @DRIVER_RENDER: + * + * Driver supports dedicated render nodes. See also the :ref:`section on + * render nodes <drm_render_node>` for details. + */ + DRIVER_RENDER = BIT(3), + /** + * @DRIVER_ATOMIC: + * + * Driver supports the full atomic modesetting userspace API. Drivers + * which only use atomic internally, but do not support the full + * userspace API (e.g. not all properties converted to atomic, or + * multi-plane updates are not guaranteed to be tear-free) should not + * set this flag. + */ + DRIVER_ATOMIC = BIT(4), + /** + * @DRIVER_SYNCOBJ: + * + * Driver supports &drm_syncobj for explicit synchronization of command + * submission. + */ + DRIVER_SYNCOBJ = BIT(5), + /** + * @DRIVER_SYNCOBJ_TIMELINE: + * + * Driver supports the timeline flavor of &drm_syncobj for explicit + * synchronization of command submission. + */ + DRIVER_SYNCOBJ_TIMELINE = BIT(6), + + /* IMPORTANT: Below are all the legacy flags, add new ones above. */ + + /** + * @DRIVER_USE_AGP: + * + * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage + * AGP resources. New drivers don't need this. + */ + DRIVER_USE_AGP = BIT(25), + /** + * @DRIVER_LEGACY: + * + * Denote a legacy driver using shadow attach. Do not use. + */ + DRIVER_LEGACY = BIT(26), + /** + * @DRIVER_PCI_DMA: + * + * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace + * will be enabled. Only for legacy drivers. Do not use. + */ + DRIVER_PCI_DMA = BIT(27), + /** + * @DRIVER_SG: + * + * Driver can perform scatter/gather DMA, allocation and mapping of + * scatter/gather buffers will be enabled. Only for legacy drivers. Do + * not use. + */ + DRIVER_SG = BIT(28), + + /** + * @DRIVER_HAVE_DMA: + * + * Driver supports DMA, the userspace DMA API will be supported. Only + * for legacy drivers. Do not use. + */ + DRIVER_HAVE_DMA = BIT(29), + /** + * @DRIVER_HAVE_IRQ: + * + * Legacy irq support. Only for legacy drivers. Do not use. + */ + DRIVER_HAVE_IRQ = BIT(30), + /** + * @DRIVER_KMS_LEGACY_CONTEXT: + * + * Used only by nouveau for backwards compatibility with existing + * userspace. Do not use. + */ + DRIVER_KMS_LEGACY_CONTEXT = BIT(31), +}; + +/** + * struct drm_driver - DRM driver structure + * + * This structure represent the common code for a family of cards. There will be + * one &struct drm_device for each card present in this family. It contains lots + * of vfunc entries, and a pile of those probably should be moved to more + * appropriate places like &drm_mode_config_funcs or into a new operations + * structure for GEM drivers. + */ +struct drm_driver { + /** + * @load: + * + * Backward-compatible driver callback to complete initialization steps + * after the driver is registered. For this reason, may suffer from + * race conditions and its use is deprecated for new drivers. It is + * therefore only supported for existing drivers not yet converted to + * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for + * proper and race-free way to set up a &struct drm_device. + * + * This is deprecated, do not use! + * + * Returns: + * + * Zero on success, non-zero value on failure. + */ + int (*load) (struct drm_device *, unsigned long flags); + + /** + * @open: + * + * Driver callback when a new &struct drm_file is opened. Useful for + * setting up driver-private data structures like buffer allocators, + * execution contexts or similar things. Such driver-private resources + * must be released again in @postclose. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to set up any modeset related resources + * in this callback. Doing so would be a driver design bug. + * + * Returns: + * + * 0 on success, a negative error code on failure, which will be + * promoted to userspace as the result of the open() system call. + */ + int (*open) (struct drm_device *, struct drm_file *); + + /** + * @postclose: + * + * One of the driver callbacks when a new &struct drm_file is closed. + * Useful for tearing down driver-private data structures allocated in + * @open like buffer allocators, execution contexts or similar things. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to tear down any modeset related + * resources in this callback. Doing so would be a driver design bug. + */ + void (*postclose) (struct drm_device *, struct drm_file *); + + /** + * @lastclose: + * + * Called when the last &struct drm_file has been closed and there's + * currently no userspace client for the &struct drm_device. + * + * Modern drivers should only use this to force-restore the fbdev + * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). + * Anything else would indicate there's something seriously wrong. + * Modern drivers can also use this to execute delayed power switching + * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` + * infrastructure. + * + * This is called after @postclose hook has been called. + * + * NOTE: + * + * All legacy drivers use this callback to de-initialize the hardware. + * This is purely because of the shadow-attach model, where the DRM + * kernel driver does not really own the hardware. Instead ownershipe is + * handled with the help of userspace through an inheritedly racy dance + * to set/unset the VT into raw mode. + * + * Legacy drivers initialize the hardware in the @firstopen callback, + * which isn't even called for modern drivers. + */ + void (*lastclose) (struct drm_device *); + + /** + * @unload: + * + * Reverse the effects of the driver load callback. Ideally, + * the clean up performed by the driver should happen in the + * reverse order of the initialization. Similarly to the load + * hook, this handler is deprecated and its usage should be + * dropped in favor of an open-coded teardown function at the + * driver layer. See drm_dev_unregister() and drm_dev_put() + * for the proper way to remove a &struct drm_device. + * + * The unload() hook is called right after unregistering + * the device. + * + */ + void (*unload) (struct drm_device *); + + /** + * @release: + * + * Optional callback for destroying device data after the final + * reference is released, i.e. the device is being destroyed. + * + * This is deprecated, clean up all memory allocations associated with a + * &drm_device using drmm_add_action(), drmm_kmalloc() and related + * managed resources functions. + */ + void (*release) (struct drm_device *); + + /** + * @master_set: + * + * Called whenever the minor master is set. Only used by vmwgfx. + */ + void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + /** + * @master_drop: + * + * Called whenever the minor master is dropped. Only used by vmwgfx. + */ + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); + + /** + * @debugfs_init: + * + * Allows drivers to create driver-specific debugfs files. + */ + void (*debugfs_init)(struct drm_minor *minor); + + /** + * @gem_create_object: constructor for gem objects + * + * Hook for allocating the GEM object struct, for use by the CMA + * and SHMEM GEM helpers. Returns a GEM object on success, or an + * ERR_PTR()-encoded error code otherwise. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + + /** + * @prime_handle_to_fd: + * + * Main PRIME export function. Should be implemented with + * drm_gem_prime_handle_to_fd() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. + */ + int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); + /** + * @prime_fd_to_handle: + * + * Main PRIME import function. Should be implemented with + * drm_gem_prime_fd_to_handle() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. + */ + int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, + int prime_fd, uint32_t *handle); + + /** + * @gem_prime_import: + * + * Import hook for GEM drivers. + * + * This defaults to drm_gem_prime_import() if not set. + */ + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + /** + * @gem_prime_import_sg_table: + * + * Optional hook used by the PRIME helper functions + * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). + */ + struct drm_gem_object *(*gem_prime_import_sg_table)( + struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + /** + * @gem_prime_mmap: + * + * mmap hook for GEM drivers, used to implement dma-buf mmap in the + * PRIME helpers. + * + * This hook only exists for historical reasons. Drivers must use + * drm_gem_prime_mmap() to implement it. + * + * FIXME: Convert all drivers to implement mmap in struct + * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into + * its callers. This hook should be removed afterwards. + */ + int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); + + /** + * @dumb_create: + * + * This creates a new dumb buffer in the driver's backing storage manager (GEM, + * TTM or something else entirely) and returns the resulting buffer handle. This + * handle can then be wrapped up into a framebuffer modeset object. + * + * Note that userspace is not allowed to use such objects for render + * acceleration - drivers must create their own private ioctls for such a use + * case. + * + * Width, height and depth are specified in the &drm_mode_create_dumb + * argument. The callback needs to fill the handle, pitch and size for + * the created buffer. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_create)(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + /** + * @dumb_map_offset: + * + * Allocate an offset in the drm device node's address space to be able to + * memory map a dumb buffer. + * + * The default implementation is drm_gem_create_mmap_offset(). GEM based + * drivers must not overwrite this. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_map_offset)(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + /** + * @dumb_destroy: + * + * This destroys the userspace handle for the given dumb backing storage buffer. + * Since buffer objects must be reference counted in the kernel a buffer object + * won't be immediately freed if a framebuffer modeset object still uses it. + * + * Called by the user via ioctl. + * + * The default implementation is drm_gem_dumb_destroy(). GEM based drivers + * must not overwrite this. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_destroy)(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); + + /** @major: driver major number */ + int major; + /** @minor: driver minor number */ + int minor; + /** @patchlevel: driver patch level */ + int patchlevel; + /** @name: driver name */ + char *name; + /** @desc: driver description */ + char *desc; + /** @date: driver date */ + char *date; + + /** + * @driver_features: + * Driver features, see &enum drm_driver_feature. Drivers can disable + * some features on a per-instance basis using + * &drm_device.driver_features. + */ + u32 driver_features; + + /** + * @ioctls: + * + * Array of driver-private IOCTL description entries. See the chapter on + * :ref:`IOCTL support in the userland interfaces + * chapter<drm_driver_ioctl>` for the full details. + */ + + const struct drm_ioctl_desc *ioctls; + /** @num_ioctls: Number of entries in @ioctls. */ + int num_ioctls; + + /** + * @fops: + * + * File operations for the DRM device node. See the discussion in + * :ref:`file operations<drm_driver_fops>` for in-depth coverage and + * some examples. + */ + const struct file_operations *fops; + +#ifdef CONFIG_DRM_LEGACY + /* Everything below here is for legacy driver, never use! */ + /* private: */ + + int (*firstopen) (struct drm_device *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + irqreturn_t (*irq_handler)(int irq, void *arg); + void (*irq_preinstall)(struct drm_device *dev); + int (*irq_postinstall)(struct drm_device *dev); + void (*irq_uninstall)(struct drm_device *dev); + u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); + int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); + void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); + int dev_priv_size; +#endif +}; + +void *__devm_drm_dev_alloc(struct device *parent, + const struct drm_driver *driver, + size_t size, size_t offset); + +/** + * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance + * @parent: Parent device object + * @driver: DRM driver + * @type: the type of the struct which contains struct &drm_device + * @member: the name of the &drm_device within @type. + * + * This allocates and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_get() and + * drm_dev_put() to take and drop further ref-counts. + * + * It is recommended that drivers embed &struct drm_device into their own device + * structure. + * + * Note that this manages the lifetime of the resulting &drm_device + * automatically using devres. The DRM device initialized with this function is + * automatically put on driver detach using drm_dev_put(). + * + * RETURNS: + * Pointer to new DRM device, or ERR_PTR on failure. + */ +#define devm_drm_dev_alloc(parent, driver, type, member) \ + ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ + offsetof(type, member))) + +struct drm_device *drm_dev_alloc(const struct drm_driver *driver, + struct device *parent); +int drm_dev_register(struct drm_device *dev, unsigned long flags); +void drm_dev_unregister(struct drm_device *dev); + +void drm_dev_get(struct drm_device *dev); +void drm_dev_put(struct drm_device *dev); +void drm_put_dev(struct drm_device *dev); +bool drm_dev_enter(struct drm_device *dev, int *idx); +void drm_dev_exit(int idx); +void drm_dev_unplug(struct drm_device *dev); + +/** + * drm_dev_is_unplugged - is a DRM device unplugged + * @dev: DRM device + * + * This function can be called to check whether a hotpluggable is unplugged. + * Unplugging itself is singalled through drm_dev_unplug(). If a device is + * unplugged, these two functions guarantee that any store before calling + * drm_dev_unplug() is visible to callers of this function after it completes + * + * WARNING: This function fundamentally races against drm_dev_unplug(). It is + * recommended that drivers instead use the underlying drm_dev_enter() and + * drm_dev_exit() function pairs. + */ +static inline bool drm_dev_is_unplugged(struct drm_device *dev) +{ + int idx; + + if (drm_dev_enter(dev, &idx)) { + drm_dev_exit(idx); + return false; + } + + return true; +} + +/** + * drm_core_check_all_features - check driver feature flags mask + * @dev: DRM device to check + * @features: feature flag(s) mask + * + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various &enum drm_driver_feature flags. + * + * Returns true if all features in the @features mask are supported, false + * otherwise. + */ +static inline bool drm_core_check_all_features(const struct drm_device *dev, + u32 features) +{ + u32 supported = dev->driver->driver_features & dev->driver_features; + + return features && (supported & features) == features; +} + +/** + * drm_core_check_feature - check driver feature flags + * @dev: DRM device to check + * @feature: feature flag + * + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various &enum drm_driver_feature flags. + * + * Returns true if the @feature is supported, false otherwise. + */ +static inline bool drm_core_check_feature(const struct drm_device *dev, + enum drm_driver_feature feature) +{ + return drm_core_check_all_features(dev, feature); +} + +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_ATOMIC) || + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); +} + + +int drm_dev_set_unique(struct drm_device *dev, const char *name); + +extern bool drm_firmware_drivers_only(void); + +#endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h new file mode 100644 index 000000000..1ed61e2b3 --- /dev/null +++ b/include/drm/drm_edid.h @@ -0,0 +1,617 @@ +/* + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_EDID_H__ +#define __DRM_EDID_H__ + +#include <linux/types.h> +#include <linux/hdmi.h> +#include <drm/drm_mode.h> + +struct drm_device; +struct drm_edid; +struct i2c_adapter; + +#define EDID_LENGTH 128 +#define DDC_ADDR 0x50 +#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ + +#define CEA_EXT 0x02 +#define VTB_EXT 0x10 +#define DI_EXT 0x40 +#define LS_EXT 0x50 +#define MI_EXT 0x60 +#define DISPLAYID_EXT 0x70 + +struct est_timings { + u8 t1; + u8 t2; + u8 mfg_rsvd; +} __attribute__((packed)); + +/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ +#define EDID_TIMING_ASPECT_SHIFT 6 +#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT) + +/* need to add 60 */ +#define EDID_TIMING_VFREQ_SHIFT 0 +#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT) + +struct std_timing { + u8 hsize; /* need to multiply by 8 then add 248 */ + u8 vfreq_aspect; +} __attribute__((packed)); + +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) +#define DRM_EDID_PT_STEREO (1 << 5) +#define DRM_EDID_PT_INTERLACED (1 << 7) + +/* If detailed data is pixel timing */ +struct detailed_pixel_timing { + u8 hactive_lo; + u8 hblank_lo; + u8 hactive_hblank_hi; + u8 vactive_lo; + u8 vblank_lo; + u8 vactive_vblank_hi; + u8 hsync_offset_lo; + u8 hsync_pulse_width_lo; + u8 vsync_offset_pulse_width_lo; + u8 hsync_vsync_offset_pulse_width_hi; + u8 width_mm_lo; + u8 height_mm_lo; + u8 width_height_mm_hi; + u8 hborder; + u8 vborder; + u8 misc; +} __attribute__((packed)); + +/* If it's not pixel timing, it'll be one of the below */ +struct detailed_data_string { + u8 str[13]; +} __attribute__((packed)); + +#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */ +#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */ +#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */ +#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */ + +#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 +#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 +#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 +#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 + +struct detailed_data_monitor_range { + u8 min_vfreq; + u8 max_vfreq; + u8 min_hfreq_khz; + u8 max_hfreq_khz; + u8 pixel_clock_mhz; /* need to multiply by 10 */ + u8 flags; + union { + struct { + u8 reserved; + u8 hfreq_start_khz; /* need to multiply by 2 */ + u8 c; /* need to divide by 2 */ + __le16 m; + u8 k; + u8 j; /* need to divide by 2 */ + } __attribute__((packed)) gtf2; + struct { + u8 version; + u8 data1; /* high 6 bits: extra clock resolution */ + u8 data2; /* plus low 2 of above: max hactive */ + u8 supported_aspects; + u8 flags; /* preferred aspect and blanking support */ + u8 supported_scalings; + u8 preferred_refresh; + } __attribute__((packed)) cvt; + } __attribute__((packed)) formula; +} __attribute__((packed)); + +struct detailed_data_wpindex { + u8 white_yx_lo; /* Lower 2 bits each */ + u8 white_x_hi; + u8 white_y_hi; + u8 gamma; /* need to divide by 100 then add 1 */ +} __attribute__((packed)); + +struct detailed_data_color_point { + u8 windex1; + u8 wpindex1[3]; + u8 windex2; + u8 wpindex2[3]; +} __attribute__((packed)); + +struct cvt_timing { + u8 code[3]; +} __attribute__((packed)); + +struct detailed_non_pixel { + u8 pad1; + u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name + fb=color point data, fa=standard timing data, + f9=undefined, f8=mfg. reserved */ + u8 pad2; + union { + struct detailed_data_string str; + struct detailed_data_monitor_range range; + struct detailed_data_wpindex color; + struct std_timing timings[6]; + struct cvt_timing cvt[4]; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define EDID_DETAIL_EST_TIMINGS 0xf7 +#define EDID_DETAIL_CVT_3BYTE 0xf8 +#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 +#define EDID_DETAIL_STD_MODES 0xfa +#define EDID_DETAIL_MONITOR_CPDATA 0xfb +#define EDID_DETAIL_MONITOR_NAME 0xfc +#define EDID_DETAIL_MONITOR_RANGE 0xfd +#define EDID_DETAIL_MONITOR_STRING 0xfe +#define EDID_DETAIL_MONITOR_SERIAL 0xff + +struct detailed_timing { + __le16 pixel_clock; /* need to multiply by 10 KHz */ + union { + struct detailed_pixel_timing pixel_data; + struct detailed_non_pixel other_data; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) +#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) +#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2) +#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) +#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) +#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) +#define DRM_EDID_INPUT_DIGITAL (1 << 7) +#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */ + +#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) +#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) +#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) +/* If analog */ +#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ +/* If digital */ +#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3) +#define DRM_EDID_FEATURE_RGB (0 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */ + +#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) +#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) +#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) + +#define DRM_EDID_HDMI_DC_48 (1 << 6) +#define DRM_EDID_HDMI_DC_36 (1 << 5) +#define DRM_EDID_HDMI_DC_30 (1 << 4) +#define DRM_EDID_HDMI_DC_Y444 (1 << 3) + +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 2) +#define DRM_EDID_YCBCR420_DC_36 (1 << 1) +#define DRM_EDID_YCBCR420_DC_30 (1 << 0) +#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ + DRM_EDID_YCBCR420_DC_36 | \ + DRM_EDID_YCBCR420_DC_30) + +/* HDMI 2.1 additional fields */ +#define DRM_EDID_MAX_FRL_RATE_MASK 0xf0 +#define DRM_EDID_FAPA_START_LOCATION (1 << 0) +#define DRM_EDID_ALLM (1 << 1) +#define DRM_EDID_FVA (1 << 2) + +/* Deep Color specific */ +#define DRM_EDID_DC_30BIT_420 (1 << 0) +#define DRM_EDID_DC_36BIT_420 (1 << 1) +#define DRM_EDID_DC_48BIT_420 (1 << 2) + +/* VRR specific */ +#define DRM_EDID_CNMVRR (1 << 3) +#define DRM_EDID_CINEMA_VRR (1 << 4) +#define DRM_EDID_MDELTA (1 << 5) +#define DRM_EDID_VRR_MAX_UPPER_MASK 0xc0 +#define DRM_EDID_VRR_MAX_LOWER_MASK 0xff +#define DRM_EDID_VRR_MIN_MASK 0x3f + +/* DSC specific */ +#define DRM_EDID_DSC_10BPC (1 << 0) +#define DRM_EDID_DSC_12BPC (1 << 1) +#define DRM_EDID_DSC_16BPC (1 << 2) +#define DRM_EDID_DSC_ALL_BPP (1 << 3) +#define DRM_EDID_DSC_NATIVE_420 (1 << 6) +#define DRM_EDID_DSC_1P2 (1 << 7) +#define DRM_EDID_DSC_MAX_FRL_RATE_MASK 0xf0 +#define DRM_EDID_DSC_MAX_SLICES 0xf +#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +struct edid { + u8 header[8]; + /* Vendor & product info */ + u8 mfg_id[2]; + u8 prod_code[2]; + u32 serial; /* FIXME: byte order */ + u8 mfg_week; + u8 mfg_year; + /* EDID version */ + u8 version; + u8 revision; + /* Display info: */ + u8 input; + u8 width_cm; + u8 height_cm; + u8 gamma; + u8 features; + /* Color characteristics */ + u8 red_green_lo; + u8 blue_white_lo; + u8 red_x; + u8 red_y; + u8 green_x; + u8 green_y; + u8 blue_x; + u8 blue_y; + u8 white_x; + u8 white_y; + /* Est. timings and mfg rsvd timings*/ + struct est_timings established_timings; + /* Standard timings 1-8*/ + struct std_timing standard_timings[8]; + /* Detailing timings 1-4 */ + struct detailed_timing detailed_timings[4]; + /* Number of 128 byte ext. blocks */ + u8 extensions; + /* Checksum */ + u8 checksum; +} __attribute__((packed)); + +#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) + +/* Short Audio Descriptor */ +struct cea_sad { + u8 format; + u8 channels; /* max number of channels - 1 */ + u8 freq; + u8 byte2; /* meaning depends on format */ +}; + +struct drm_encoder; +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; + +int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads); +int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); +int drm_av_sync_delay(struct drm_connector *connector, + const struct drm_display_mode *mode); + +#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE +struct edid *drm_load_edid_firmware(struct drm_connector *connector); +int __drm_set_edid_firmware_path(const char *path); +int __drm_get_edid_firmware_path(char *buf, size_t bufsize); +#else +static inline struct edid * +drm_load_edid_firmware(struct drm_connector *connector) +{ + return ERR_PTR(-ENOENT); +} +#endif + +bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); + +int +drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode); +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode); + +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range); + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const uint8_t *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const uint8_t *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const uint8_t *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +/** + * drm_edid_decode_mfg_id - Decode the manufacturer ID + * @mfg_id: The manufacturer ID + * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' + * termination + */ +static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4]) +{ + vend[0] = '@' + ((mfg_id >> 10) & 0x1f); + vend[1] = '@' + ((mfg_id >> 5) & 0x1f); + vend[2] = '@' + ((mfg_id >> 0) & 0x1f); + vend[3] = '\0'; + + return vend; +} + +/** + * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id() + * @vend_chr_0: First character of the vendor string. + * @vend_chr_1: Second character of the vendor string. + * @vend_chr_2: Third character of the vendor string. + * @product_id: The 16-bit product ID. + * + * This is a macro so that it can be calculated at compile time and used + * as an initializer. + * + * For instance: + * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08 + * + * Return: a 32-bit ID per panel. + */ +#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \ + ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \ + (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \ + (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \ + ((product_id) & 0xffff)) + +/** + * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id() + * @panel_id: The panel ID to decode. + * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' + * termination + * @product_id: The product ID will be returned here. + * + * For instance, after: + * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id) + * These will be true: + * vend[0] = 'B' + * vend[1] = 'O' + * vend[2] = 'E' + * vend[3] = '\0' + * product_id = 0x2d08 + */ +static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id) +{ + *product_id = (u16)(panel_id & 0xffff); + drm_edid_decode_mfg_id(panel_id >> 16, vend); +} + +bool drm_probe_ddc(struct i2c_adapter *adapter); +struct edid *drm_do_get_edid(struct drm_connector *connector, + int (*get_edid_block)(void *data, u8 *buf, unsigned int block, + size_t len), + void *data); +struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter); +u32 drm_edid_get_panel_id(struct i2c_adapter *adapter); +struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); +struct edid *drm_edid_duplicate(const struct edid *edid); +int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); +int drm_add_override_edid_modes(struct drm_connector *connector); + +u8 drm_match_cea_mode(const struct drm_display_mode *to_match); +bool drm_detect_hdmi_monitor(const struct edid *edid); +bool drm_detect_monitor_audio(const struct edid *edid); +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode); +int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); +void drm_set_preferred_mode(struct drm_connector *connector, + int hpref, int vpref); + +int drm_edid_header_is_valid(const void *edid); +bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, + bool *edid_corrupt); +bool drm_edid_is_valid(struct edid *edid); +void drm_edid_get_monitor_name(const struct edid *edid, char *name, + int buflen); +struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, + int hsize, int vsize, int fresh, + bool rb); +struct drm_display_mode * +drm_display_mode_from_cea_vic(struct drm_device *dev, + u8 video_code); + +/* Interface based on struct drm_edid */ +const struct drm_edid *drm_edid_alloc(const void *edid, size_t size); +const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid); +void drm_edid_free(const struct drm_edid *drm_edid); +const struct edid *drm_edid_raw(const struct drm_edid *drm_edid); +const struct drm_edid *drm_edid_read(struct drm_connector *connector); +const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector, + struct i2c_adapter *adapter); +const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector, + int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len), + void *context); +int drm_edid_connector_update(struct drm_connector *connector, + const struct drm_edid *edid); +const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, + int ext_id, int *ext_index); + +#endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h new file mode 100644 index 000000000..3a09682af --- /dev/null +++ b/include/drm/drm_encoder.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_ENCODER_H__ +#define __DRM_ENCODER_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_util.h> + +struct drm_encoder; + +/** + * struct drm_encoder_funcs - encoder controls + * + * Encoders sit between CRTCs and connectors. + */ +struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ + void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ + void (*destroy)(struct drm_encoder *encoder); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the encoder like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_encoder *encoder); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the encoder from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_encoder *encoder); +}; + +/** + * struct drm_encoder - central DRM encoder structure + * @dev: parent DRM device + * @head: list management + * @base: base KMS object + * @name: human readable name, can be overwritten by the driver + * @funcs: control functions, can be NULL for simple managed encoders + * @helper_private: mid-layer private data + * + * CRTCs drive pixels to encoders, which convert them into signals + * appropriate for a given connector or set of connectors. + */ +struct drm_encoder { + struct drm_device *dev; + struct list_head head; + + struct drm_mode_object base; + char *name; + /** + * @encoder_type: + * + * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following + * encoder types are defined thus far: + * + * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A. + * + * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort. + * + * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel + * with a proprietary parallel connector. + * + * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, + * Component, SCART). + * + * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays + * + * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus. + * + * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel + * bus. + * + * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow + * mutliple DP MST streams to share one physical encoder. + */ + int encoder_type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the encoder. + */ + unsigned index; + + /** + * @possible_crtcs: Bitmask of potential CRTC bindings, using + * drm_crtc_index() as the index into the bitfield. The driver must set + * the bits for all &drm_crtc objects this encoder can be connected to + * before calling drm_dev_register(). + * + * You will get a WARN if you get this wrong in the driver. + * + * Note that since CRTC objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_crtcs; + + /** + * @possible_clones: Bitmask of potential sibling encoders for cloning, + * using drm_encoder_index() as the index into the bitfield. The driver + * must set the bits for all &drm_encoder objects which can clone a + * &drm_crtc together with this encoder before calling + * drm_dev_register(). Drivers should set the bit representing the + * encoder itself, too. Cloning bits should be set such that when two + * encoders can be used in a cloned configuration, they both should have + * each another bits set. + * + * As an exception to the above rule if the driver doesn't implement + * any cloning it can leave @possible_clones set to 0. The core will + * automagically fix this up by setting the bit for the encoder itself. + * + * You will get a WARN if you get this wrong in the driver. + * + * Note that since encoder objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_clones; + + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ + struct drm_crtc *crtc; + + /** + * @bridge_chain: Bridges attached to this encoder. Drivers shall not + * access this field directly. + */ + struct list_head bridge_chain; + + const struct drm_encoder_funcs *funcs; + const struct drm_encoder_helper_funcs *helper_private; +}; + +#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) + +__printf(5, 6) +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); + +__printf(5, 6) +int drmm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); + +__printf(6, 7) +void *__drmm_encoder_alloc(struct drm_device *dev, + size_t size, size_t offset, + const struct drm_encoder_funcs *funcs, + int encoder_type, + const char *name, ...); + +/** + * drmm_encoder_alloc - Allocate and initialize an encoder + * @dev: drm device + * @type: the type of the struct which contains struct &drm_encoder + * @member: the name of the &drm_encoder within @type + * @funcs: callbacks for this encoder (optional) + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * Allocates and initializes an encoder. Encoder should be subclassed as part of + * driver encoder objects. Cleanup is automatically handled through registering + * drm_encoder_cleanup() with drmm_add_action(). + * + * The @drm_encoder_funcs.destroy hook must be NULL. + * + * Returns: + * Pointer to new encoder, or ERR_PTR on failure. + */ +#define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \ + ((type *)__drmm_encoder_alloc(dev, sizeof(type), \ + offsetof(type, member), funcs, \ + encoder_type, name, ##__VA_ARGS__)) + +/** + * drmm_plain_encoder_alloc - Allocate and initialize an encoder + * @dev: drm device + * @funcs: callbacks for this encoder (optional) + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * This is a simplified version of drmm_encoder_alloc(), which only allocates + * and returns a struct drm_encoder instance, with no subclassing. + * + * Returns: + * Pointer to the new drm_encoder struct, or ERR_PTR on failure. + */ +#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \ + ((struct drm_encoder *) \ + __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \ + 0, funcs, encoder_type, name, ##__VA_ARGS__)) + +/** + * drm_encoder_index - find the index of a registered encoder + * @encoder: encoder to find index for + * + * Given a registered encoder, return the index of that encoder within a DRM + * device's list of encoders. + */ +static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder) +{ + return encoder->index; +} + +/** + * drm_encoder_mask - find the mask of a registered encoder + * @encoder: encoder to find mask for + * + * Given a registered encoder, return the mask bit of that encoder for an + * encoder's possible_clones field. + */ +static inline u32 drm_encoder_mask(const struct drm_encoder *encoder) +{ + return 1 << drm_encoder_index(encoder); +} + +/** + * drm_encoder_crtc_ok - can a given crtc drive a given encoder? + * @encoder: encoder to test + * @crtc: crtc to test + * + * Returns false if @encoder can't be driven by @crtc, true otherwise. + */ +static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); +} + +/** + * drm_encoder_find - find a &drm_encoder + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: encoder id + * + * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); + + return mo ? obj_to_encoder(mo) : NULL; +} + +void drm_encoder_cleanup(struct drm_encoder *encoder); + +/** + * drm_for_each_encoder_mask - iterate over encoders specified by bitmask + * @encoder: the loop cursor + * @dev: the DRM device + * @encoder_mask: bitmask of encoder indices + * + * Iterate over all encoders specified by bitmask. + */ +#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ + list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ + for_each_if ((encoder_mask) & drm_encoder_mask(encoder)) + +/** + * drm_for_each_encoder - iterate over all encoders + * @encoder: the loop cursor + * @dev: the DRM device + * + * Iterate over all encoders of @dev. + */ +#define drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) + +#endif diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h new file mode 100644 index 000000000..7214101fd --- /dev/null +++ b/include/drm/drm_encoder_slave.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_ENCODER_SLAVE_H__ +#define __DRM_ENCODER_SLAVE_H__ + +#include <linux/i2c.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> + +/** + * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation + * dependent. It will usually be a structure with DVO port + * data format settings or timings. It's not required for + * the new parameters to take effect until the next mode + * is set. + * + * Most of its members are analogous to the function pointers in + * &drm_encoder_helper_funcs and they can optionally be used to + * initialize the latter. Connector-like methods (e.g. @get_modes and + * @set_property) will typically be wrapped around and only be called + * if the encoder is the currently selected one for the connector. + */ +struct drm_encoder_slave_funcs { + void (*set_config)(struct drm_encoder *encoder, + void *params); + + void (*destroy)(struct drm_encoder *encoder); + void (*dpms)(struct drm_encoder *encoder, int mode); + void (*save)(struct drm_encoder *encoder); + void (*restore)(struct drm_encoder *encoder); + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + int (*mode_valid)(struct drm_encoder *encoder, + struct drm_display_mode *mode); + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*get_modes)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*create_resources)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*set_property)(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val); + +}; + +/** + * struct drm_encoder_slave - Slave encoder struct + * @base: DRM encoder object. + * @slave_funcs: Slave encoder callbacks. + * @slave_priv: Slave encoder private data. + * @bus_priv: Bus specific data. + * + * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the + * ones in @base. The former are never actually called by the common + * CRTC code, it's just a convenience for splitting the encoder + * functions in an upper, GPU-specific layer and a (hopefully) + * GPU-agnostic lower layer: It's the GPU driver responsibility to + * call the slave methods when appropriate. + * + * drm_i2c_encoder_init() provides a way to get an implementation of + * this. + */ +struct drm_encoder_slave { + struct drm_encoder base; + + const struct drm_encoder_slave_funcs *slave_funcs; + void *slave_priv; + void *bus_priv; +}; +#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) + +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info); + + +/** + * struct drm_i2c_encoder_driver + * + * Describes a device driver for an encoder connected to the GPU + * through an I2C bus. In addition to the entry points in @i2c_driver + * an @encoder_init function should be provided. It will be called to + * give the driver an opportunity to allocate any per-encoder data + * structures and to initialize the @slave_funcs and (optionally) + * @slave_priv members of @encoder. + */ +struct drm_i2c_encoder_driver { + struct i2c_driver i2c_driver; + + int (*encoder_init)(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder); + +}; +#define to_drm_i2c_encoder_driver(x) container_of((x), \ + struct drm_i2c_encoder_driver, \ + i2c_driver) + +/** + * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + */ +static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) +{ + return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv; +} + +/** + * drm_i2c_encoder_register - Register an I2C encoder driver + * @owner: Module containing the driver. + * @driver: Driver to be registered. + */ +static inline int drm_i2c_encoder_register(struct module *owner, + struct drm_i2c_encoder_driver *driver) +{ + return i2c_register_driver(owner, &driver->i2c_driver); +} + +/** + * drm_i2c_encoder_unregister - Unregister an I2C encoder driver + * @driver: Driver to be unregistered. + */ +static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) +{ + i2c_del_driver(&driver->i2c_driver); +} + +void drm_i2c_encoder_destroy(struct drm_encoder *encoder); + + +/* + * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: + */ + +void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode); +bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_i2c_encoder_prepare(struct drm_encoder *encoder); +void drm_i2c_encoder_commit(struct drm_encoder *encoder); +void drm_i2c_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector); +void drm_i2c_encoder_save(struct drm_encoder *encoder); +void drm_i2c_encoder_restore(struct drm_encoder *encoder); + + +#endif diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h new file mode 100644 index 000000000..d5e036c57 --- /dev/null +++ b/include/drm/drm_fb_dma_helper.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_FB_DMA_HELPER_H__ +#define __DRM_FB_DMA_HELPER_H__ + +#include <linux/types.h> + +struct drm_device; +struct drm_framebuffer; +struct drm_plane_state; + +struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane); + +dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb, + struct drm_plane_state *state, + unsigned int plane); + +void drm_fb_dma_sync_non_coherent(struct drm_device *drm, + struct drm_plane_state *old_state, + struct drm_plane_state *state); + +#endif + diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h new file mode 100644 index 000000000..fddd0d1af --- /dev/null +++ b/include/drm/drm_fb_helper.h @@ -0,0 +1,453 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@linux.ie> + * Jesse Barnes <jesse.barnes@intel.com> + */ +#ifndef DRM_FB_HELPER_H +#define DRM_FB_HELPER_H + +struct drm_fb_helper; + +#include <drm/drm_client.h> +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <linux/fb.h> +#include <linux/kgdb.h> + +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + +/** + * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size + * @fb_width: fbdev width + * @fb_height: fbdev height + * @surface_width: scanout buffer width + * @surface_height: scanout buffer height + * @surface_bpp: scanout buffer bpp + * @surface_depth: scanout buffer depth + * + * Note that the scanout surface width/height may be larger than the fbdev + * width/height. In case of multiple displays, the scanout surface is sized + * according to the largest width/height (so it is large enough for all CRTCs + * to scanout). But the fbdev width/height is sized to the minimum width/ + * height of all the displays. This ensures that fbcon fits on the smallest + * of the attached displays. fb_width/fb_height is used by + * drm_fb_helper_fill_info() to fill out the &fb_info.var structure. + */ +struct drm_fb_helper_surface_size { + u32 fb_width; + u32 fb_height; + u32 surface_width; + u32 surface_height; + u32 surface_bpp; + u32 surface_depth; +}; + +/** + * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library + * + * Driver callbacks used by the fbdev emulation helper library. + */ +struct drm_fb_helper_funcs { + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ + int (*fb_probe)(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +}; + +/** + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS + * @fb: Scanout framebuffer object + * @dev: DRM device + * @funcs: driver callbacks for fb helper + * @fbdev: emulated fbdev device info struct + * @pseudo_palette: fake palette of 16 colors + * @damage_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @damage_lock: spinlock protecting @damage_clip + * @damage_work: worker used to flush the framebuffer + * @resume_work: worker used during resume if the console lock is already taken + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a &struct drm_fb_helper_funcs with a few + * operations. + */ +struct drm_fb_helper { + /** + * @client: + * + * DRM client used by the generic fbdev emulation. + */ + struct drm_client_dev client; + + /** + * @buffer: + * + * Framebuffer used by the generic fbdev emulation. + */ + struct drm_client_buffer *buffer; + + struct drm_framebuffer *fb; + struct drm_device *dev; + const struct drm_fb_helper_funcs *funcs; + struct fb_info *fbdev; + u32 pseudo_palette[17]; + struct drm_clip_rect damage_clip; + spinlock_t damage_lock; + struct work_struct damage_work; + struct work_struct resume_work; + + /** + * @lock: + * + * Top-level FBDEV helper lock. This protects all internal data + * structures and lists, such as @connector_info and @crtc_info. + * + * FIXME: fbdev emulation locking is a mess and long term we want to + * protect all helper internal state with this lock as well as reduce + * core KMS locking as much as possible. + */ + struct mutex lock; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ + struct list_head kernel_fb_list; + + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ + bool delayed_hotplug; + + /** + * @deferred_setup: + * + * If no outputs are connected (disconnected or unknown) the FB helper + * code will defer setup until at least one of the outputs shows up. + * This field keeps track of the status so that setup can be retried + * at every hotplug event until it succeeds eventually. + * + * Protected by @lock. + */ + bool deferred_setup; + + /** + * @preferred_bpp: + * + * Temporary storage for the driver's preferred BPP setting passed to + * FB helper initialization. This needs to be tracked so that deferred + * FB helper setup can pass this on. + * + * See also: @deferred_setup + */ + int preferred_bpp; +}; + +static inline struct drm_fb_helper * +drm_fb_helper_from_client(struct drm_client_dev *client) +{ + return container_of(client, struct drm_fb_helper, client); +} + +/** + * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers + * + * Helper define to register default implementations of drm_fb_helper + * functions. To be used in struct fb_ops of drm drivers. + */ +#define DRM_FB_HELPER_DEFAULT_OPS \ + .fb_check_var = drm_fb_helper_check_var, \ + .fb_set_par = drm_fb_helper_set_par, \ + .fb_setcmap = drm_fb_helper_setcmap, \ + .fb_blank = drm_fb_helper_blank, \ + .fb_pan_display = drm_fb_helper_pan_display, \ + .fb_debug_enter = drm_fb_helper_debug_enter, \ + .fb_debug_leave = drm_fb_helper_debug_leave, \ + .fb_ioctl = drm_fb_helper_ioctl + +#ifdef CONFIG_DRM_FBDEV_EMULATION +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs); +int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper); +void drm_fb_helper_fini(struct drm_fb_helper *helper); +int drm_fb_helper_blank(int blank, struct fb_info *info); +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); +int drm_fb_helper_set_par(struct fb_info *info); +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); + +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); + +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_fill_info(struct fb_info *info, + struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); + +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); +void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, + bool suspend); + +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); + +int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg); + +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); +int drm_fb_helper_debug_enter(struct fb_info *info); +int drm_fb_helper_debug_leave(struct fb_info *info); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); + +void drm_fbdev_generic_setup(struct drm_device *dev, + unsigned int preferred_bpp); +#else +static inline void drm_fb_helper_prepare(struct drm_device *dev, + struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ +} + +static inline int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper) +{ + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + + return 0; +} + +static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) +{ + if (helper && helper->dev) + helper->dev->fb_helper = NULL; +} + +static inline int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_set_par(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int +drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline struct fb_info * +drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + return NULL; +} + +static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline void +drm_fb_helper_fill_info(struct fb_info *info, + struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ +} + +static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ +} + +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, + char __user *buf, size_t count, + loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, + bool suspend) +{ +} + +static inline void +drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend) +{ +} + +static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, + int bpp_sel) +{ + return 0; +} + +static inline int drm_fb_helper_debug_enter(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_debug_leave(struct fb_info *info) +{ + return 0; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + +static inline void +drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ +} + +#endif + +#endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h new file mode 100644 index 000000000..d780fd151 --- /dev/null +++ b/include/drm/drm_file.h @@ -0,0 +1,424 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_FILE_H_ +#define _DRM_FILE_H_ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/idr.h> + +#include <uapi/drm/drm.h> + +#include <drm/drm_prime.h> + +struct dma_fence; +struct drm_file; +struct drm_device; +struct device; +struct file; + +/* + * FIXME: Not sure we want to have drm_minor here in the end, but to avoid + * header include loops we need it here for now. + */ + +/* Note that the order of this enum is ABI (it determines + * /dev/dri/renderD* numbers). + */ +enum drm_minor_type { + DRM_MINOR_PRIMARY, + DRM_MINOR_CONTROL, + DRM_MINOR_RENDER, +}; + +/** + * struct drm_minor - DRM device minor structure + * + * This structure represents a DRM minor number for device nodes in /dev. + * Entirely opaque to drivers and should never be inspected directly by drivers. + * Drivers instead should only interact with &struct drm_file and of course + * &struct drm_device, which is also where driver-private data and resources can + * be attached to. + */ +struct drm_minor { + /* private: */ + int index; /* Minor device number */ + int type; /* Control or render */ + struct device *kdev; /* Linux device */ + struct drm_device *dev; + + struct dentry *debugfs_root; + + struct list_head debugfs_list; + struct mutex debugfs_lock; /* Protects debugfs_list. */ +}; + +/** + * struct drm_pending_event - Event queued up for userspace to read + * + * This represents a DRM event. Drivers can use this as a generic completion + * mechanism, which supports kernel-internal &struct completion, &struct dma_fence + * and also the DRM-specific &struct drm_event delivery mechanism. + */ +struct drm_pending_event { + /** + * @completion: + * + * Optional pointer to a kernel internal completion signalled when + * drm_send_event() is called, useful to internally synchronize with + * nonblocking operations. + */ + struct completion *completion; + + /** + * @completion_release: + * + * Optional callback currently only used by the atomic modeset helpers + * to clean up the reference count for the structure @completion is + * stored in. + */ + void (*completion_release)(struct completion *completion); + + /** + * @event: + * + * Pointer to the actual event that should be sent to userspace to be + * read using drm_read(). Can be optional, since nowadays events are + * also used to signal kernel internal threads with @completion or DMA + * transactions using @fence. + */ + struct drm_event *event; + + /** + * @fence: + * + * Optional DMA fence to unblock other hardware transactions which + * depend upon the nonblocking DRM operation this event represents. + */ + struct dma_fence *fence; + + /** + * @file_priv: + * + * &struct drm_file where @event should be delivered to. Only set when + * @event is set. + */ + struct drm_file *file_priv; + + /** + * @link: + * + * Double-linked list to keep track of this event. Can be used by the + * driver up to the point when it calls drm_send_event(), after that + * this list entry is owned by the core for its own book-keeping. + */ + struct list_head link; + + /** + * @pending_link: + * + * Entry on &drm_file.pending_event_list, to keep track of all pending + * events for @file_priv, to allow correct unwinding of them when + * userspace closes the file before the event is delivered. + */ + struct list_head pending_link; +}; + +/** + * struct drm_file - DRM file private data + * + * This structure tracks DRM state per open file descriptor. + */ +struct drm_file { + /** + * @authenticated: + * + * Whether the client is allowed to submit rendering, which for legacy + * nodes means it must be authenticated. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ + bool authenticated; + + /** + * @stereo_allowed: + * + * True when the client has asked us to expose stereo 3D mode flags. + */ + bool stereo_allowed; + + /** + * @universal_planes: + * + * True if client understands CRTC primary planes and cursor planes + * in the plane list. Automatically set when @atomic is set. + */ + bool universal_planes; + + /** @atomic: True if client understands atomic properties. */ + bool atomic; + + /** + * @aspect_ratio_allowed: + * + * True, if client can handle picture aspect ratios, and has requested + * to pass this information along with the mode. + */ + bool aspect_ratio_allowed; + + /** + * @writeback_connectors: + * + * True if client understands writeback connectors + */ + bool writeback_connectors; + + /** + * @was_master: + * + * This client has or had, master capability. Protected by struct + * &drm_device.master_mutex. + * + * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the + * client is or was master in the past. + */ + bool was_master; + + /** + * @is_master: + * + * This client is the creator of @master. Protected by struct + * &drm_device.master_mutex. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ + bool is_master; + + /** + * @master: + * + * Master this node is currently associated with. Protected by struct + * &drm_device.master_mutex, and serialized by @master_lookup_lock. + * + * Only relevant if drm_is_primary_client() returns true. Note that + * this only matches &drm_device.master if the master is the currently + * active one. + * + * To update @master, both &drm_device.master_mutex and + * @master_lookup_lock need to be held, therefore holding either of + * them is safe and enough for the read side. + * + * When dereferencing this pointer, either hold struct + * &drm_device.master_mutex for the duration of the pointer's use, or + * use drm_file_get_master() if struct &drm_device.master_mutex is not + * currently held and there is no other need to hold it. This prevents + * @master from being freed during use. + * + * See also @authentication and @is_master and the :ref:`section on + * primary nodes and authentication <drm_primary_node>`. + */ + struct drm_master *master; + + /** @master_lookup_lock: Serializes @master. */ + spinlock_t master_lookup_lock; + + /** @pid: Process that opened this file. */ + struct pid *pid; + + /** @magic: Authentication magic, see @authenticated. */ + drm_magic_t magic; + + /** + * @lhead: + * + * List of all open files of a DRM device, linked into + * &drm_device.filelist. Protected by &drm_device.filelist_mutex. + */ + struct list_head lhead; + + /** @minor: &struct drm_minor for this file. */ + struct drm_minor *minor; + + /** + * @object_idr: + * + * Mapping of mm object handles to object pointers. Used by the GEM + * subsystem. Protected by @table_lock. + */ + struct idr object_idr; + + /** @table_lock: Protects @object_idr. */ + spinlock_t table_lock; + + /** @syncobj_idr: Mapping of sync object handles to object pointers. */ + struct idr syncobj_idr; + /** @syncobj_table_lock: Protects @syncobj_idr. */ + spinlock_t syncobj_table_lock; + + /** @filp: Pointer to the core file structure. */ + struct file *filp; + + /** + * @driver_priv: + * + * Optional pointer for driver private data. Can be allocated in + * &drm_driver.open and should be freed in &drm_driver.postclose. + */ + void *driver_priv; + + /** + * @fbs: + * + * List of &struct drm_framebuffer associated with this file, using the + * &drm_framebuffer.filp_head entry. + * + * Protected by @fbs_lock. Note that the @fbs list holds a reference on + * the framebuffer object to prevent it from untimely disappearing. + */ + struct list_head fbs; + + /** @fbs_lock: Protects @fbs. */ + struct mutex fbs_lock; + + /** + * @blobs: + * + * User-created blob properties; this retains a reference on the + * property. + * + * Protected by @drm_mode_config.blob_lock; + */ + struct list_head blobs; + + /** @event_wait: Waitqueue for new events added to @event_list. */ + wait_queue_head_t event_wait; + + /** + * @pending_event_list: + * + * List of pending &struct drm_pending_event, used to clean up pending + * events in case this file gets closed before the event is signalled. + * Uses the &drm_pending_event.pending_link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head pending_event_list; + + /** + * @event_list: + * + * List of &struct drm_pending_event, ready for delivery to userspace + * through drm_read(). Uses the &drm_pending_event.link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head event_list; + + /** + * @event_space: + * + * Available event space to prevent userspace from + * exhausting kernel memory. Currently limited to the fairly arbitrary + * value of 4KB. + */ + int event_space; + + /** @event_read_lock: Serializes drm_read(). */ + struct mutex event_read_lock; + + /** + * @prime: + * + * Per-file buffer caches used by the PRIME buffer sharing code. + */ + struct drm_prime_file_private prime; + + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + unsigned long lock_count; /* DRI1 legacy lock count */ +#endif +}; + +/** + * drm_is_primary_client - is this an open file of the primary node + * @file_priv: DRM file + * + * Returns true if this is an open file of the primary node, i.e. + * &drm_file.minor of @file_priv is a primary minor. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ +static inline bool drm_is_primary_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_PRIMARY; +} + +/** + * drm_is_render_client - is this an open file of the render node + * @file_priv: DRM file + * + * Returns true if this is an open file of the render node, i.e. + * &drm_file.minor of @file_priv is a render minor. + * + * See also the :ref:`section on render nodes <drm_render_node>`. + */ +static inline bool drm_is_render_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + +int drm_open(struct inode *inode, struct file *filp); +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +int drm_release(struct inode *inode, struct file *filp); +int drm_release_noglobal(struct inode *inode, struct file *filp); +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p); +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, + ktime_t timestamp); + +struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); + +#endif /* _DRM_FILE_H_ */ diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h new file mode 100644 index 000000000..03cb89069 --- /dev/null +++ b/include/drm/drm_fixed.h @@ -0,0 +1,217 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Christian König + */ +#ifndef DRM_FIXED_H +#define DRM_FIXED_H + +#include <linux/math64.h> + +typedef union dfixed { + u32 full; +} fixed20_12; + + +#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ +#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) +#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) +#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) +#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) +#define dfixed_init(A) { .full = dfixed_const((A)) } +#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } +#define dfixed_trunc(A) ((A).full >> 12) +#define dfixed_frac(A) ((A).full & ((1 << 12) - 1)) + +static inline u32 dfixed_floor(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + return dfixed_const(non_frac); +} + +static inline u32 dfixed_ceil(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + if (A.full > dfixed_const(non_frac)) + return dfixed_const(non_frac + 1); + else + return dfixed_const(non_frac); +} + +static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) +{ + u64 tmp = ((u64)A.full << 13); + + do_div(tmp, B.full); + tmp += 1; + tmp /= 2; + return lower_32_bits(tmp); +} + +#define DRM_FIXED_POINT 32 +#define DRM_FIXED_POINT_HALF 16 +#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) +#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) +#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) +#define DRM_FIXED_EPSILON 1LL +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) + +static inline s64 drm_int2fixp(int a) +{ + return ((s64)a) << DRM_FIXED_POINT; +} + +static inline int drm_fixp2int(s64 a) +{ + return ((s64)a) >> DRM_FIXED_POINT; +} + +static inline int drm_fixp2int_round(s64 a) +{ + return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1))); +} + +static inline int drm_fixp2int_ceil(s64 a) +{ + if (a > 0) + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); + else + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); +} + +static inline unsigned drm_fixp_msbset(s64 a) +{ + unsigned shift, sign = (a >> 63) & 1; + + for (shift = 62; shift > 0; --shift) + if (((a >> shift) & 1) != sign) + return shift; + + return 0; +} + +static inline s64 drm_fixp_mul(s64 a, s64 b) +{ + unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); + s64 result; + + if (shift > 61) { + shift = shift - 61; + a >>= (shift >> 1) + (shift & 1); + b >>= shift >> 1; + } else + shift = 0; + + result = a * b; + + if (shift > DRM_FIXED_POINT) + return result << (shift - DRM_FIXED_POINT); + + if (shift < DRM_FIXED_POINT) + return result >> (DRM_FIXED_POINT - shift); + + return result; +} + +static inline s64 drm_fixp_div(s64 a, s64 b) +{ + unsigned shift = 62 - drm_fixp_msbset(a); + s64 result; + + a <<= shift; + + if (shift < DRM_FIXED_POINT) + b >>= (DRM_FIXED_POINT - shift); + + result = div64_s64(a, b); + + if (shift > DRM_FIXED_POINT) + return result >> (shift - DRM_FIXED_POINT); + + return result; +} + +static inline s64 drm_fixp_from_fraction(s64 a, s64 b) +{ + s64 res; + bool a_neg = a < 0; + bool b_neg = b < 0; + u64 a_abs = a_neg ? -a : a; + u64 b_abs = b_neg ? -b : b; + u64 rem; + + /* determine integer part */ + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem); + + /* determine fractional part */ + { + u32 i = DRM_FIXED_POINT; + + do { + rem <<= 1; + res_abs <<= 1; + if (rem >= b_abs) { + res_abs |= 1; + rem -= b_abs; + } + } while (--i != 0); + } + + /* round up LSB */ + { + u64 summand = (rem << 1) >= b_abs; + + res_abs += summand; + } + + res = (s64) res_abs; + if (a_neg ^ b_neg) + res = -res; + return res; +} + +static inline s64 drm_fixp_exp(s64 x) +{ + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); + s64 sum = DRM_FIXED_ONE, term, y = x; + u64 count = 1; + + if (x < 0) + y = -1 * x; + + term = y; + + while (term >= tolerance) { + sum = sum + term; + count = count + 1; + term = drm_fixp_mul(term, div64_s64(y, count)); + } + + if (x < 0) + sum = drm_fixp_div(DRM_FIXED_ONE, sum); + + return sum; +} + +#endif diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h new file mode 100644 index 000000000..21c3d512d --- /dev/null +++ b/include/drm/drm_flip_work.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_FLIP_WORK_H +#define DRM_FLIP_WORK_H + +#include <linux/kfifo.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +/** + * DOC: flip utils + * + * Util to queue up work to run from work-queue context after flip/vblank. + * Typically this can be used to defer unref of framebuffer's, cursor + * bo's, etc until after vblank. The APIs are all thread-safe. + * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called + * in atomic context. + */ + +struct drm_flip_work; + +/* + * drm_flip_func_t - callback function + * + * @work: the flip work + * @val: value queued via drm_flip_work_queue() + * + * Callback function to be called for each of the queue'd work items after + * drm_flip_work_commit() is called. + */ +typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); + +/** + * struct drm_flip_task - flip work task + * @node: list entry element + * @data: data to pass to &drm_flip_work.func + */ +struct drm_flip_task { + struct list_head node; + void *data; +}; + +/** + * struct drm_flip_work - flip work queue + * @name: debug name + * @func: callback fxn called for each committed item + * @worker: worker which calls @func + * @queued: queued tasks + * @commited: commited tasks + * @lock: lock to access queued and commited lists + */ +struct drm_flip_work { + const char *name; + drm_flip_func_t func; + struct work_struct worker; + struct list_head queued; + struct list_head commited; + spinlock_t lock; +}; + +struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); +void drm_flip_work_queue_task(struct drm_flip_work *work, + struct drm_flip_task *task); +void drm_flip_work_queue(struct drm_flip_work *work, void *val); +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq); +void drm_flip_work_init(struct drm_flip_work *work, + const char *name, drm_flip_func_t func); +void drm_flip_work_cleanup(struct drm_flip_work *work); + +#endif /* DRM_FLIP_WORK_H */ diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h new file mode 100644 index 000000000..eb5c98cf8 --- /dev/null +++ b/include/drm/drm_format_helper.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2016 Noralf Trønnes + */ + +#ifndef __LINUX_DRM_FORMAT_HELPER_H +#define __LINUX_DRM_FORMAT_HELPER_H + +#include <linux/types.h> + +struct drm_device; +struct drm_format_info; +struct drm_framebuffer; +struct drm_rect; + +struct iosys_map; + +unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, + const struct drm_rect *clip); + +void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, bool cached); +void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, bool swab); +void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); + +int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *rect); + +void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip); + +size_t drm_fb_build_fourcc_list(struct drm_device *dev, + const u32 *native_fourccs, size_t native_nfourccs, + const u32 *extra_fourccs, size_t extra_nfourccs, + u32 *fourccs_out, size_t nfourccs_out); + +#endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h new file mode 100644 index 000000000..532ae78ca --- /dev/null +++ b/include/drm/drm_fourcc.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef __DRM_FOURCC_H__ +#define __DRM_FOURCC_H__ + +#include <linux/types.h> +#include <uapi/drm/drm_fourcc.h> + +/** + * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have + */ +#define DRM_FORMAT_MAX_PLANES 4u + +/* + * DRM formats are little endian. Define host endian variants for the + * most common formats here, to reduce the #ifdefs needed in drivers. + * + * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in + * case the format can't be specified otherwise, so we don't end up + * with two values describing the same format. + */ +#ifdef __BIG_ENDIAN +# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 +#else +# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 +# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 +#endif + +struct drm_device; +struct drm_mode_fb_cmd2; + +/** + * struct drm_format_info - information about a DRM format + */ +struct drm_format_info { + /** @format: 4CC format identifier (DRM_FORMAT_*) */ + u32 format; + + /** + * @depth: + * + * Color depth (number of bits per pixel excluding padding bits), + * valid for a subset of RGB formats only. This is a legacy field, do + * not use in new code and set to 0 for new formats. + */ + u8 depth; + + /** @num_planes: Number of color planes (1 to 3) */ + u8 num_planes; + + union { + /** + * @cpp: + * + * Number of bytes per pixel (per plane), this is aliased with + * @char_per_block. It is deprecated in favour of using the + * triplet @char_per_block, @block_w, @block_h for better + * describing the pixel format. + */ + u8 cpp[DRM_FORMAT_MAX_PLANES]; + + /** + * @char_per_block: + * + * Number of bytes per block (per plane), where blocks are + * defined as a rectangle of pixels which are stored next to + * each other in a byte aligned memory region. Together with + * @block_w and @block_h this is used to properly describe tiles + * in tiled formats or to describe groups of pixels in packed + * formats for which the memory needed for a single pixel is not + * byte aligned. + * + * @cpp has been kept for historical reasons because there are + * a lot of places in drivers where it's used. In drm core for + * generic code paths the preferred way is to use + * @char_per_block, drm_format_info_block_width() and + * drm_format_info_block_height() which allows handling both + * block and non-block formats in the same way. + * + * For formats that are intended to be used only with non-linear + * modifiers both @cpp and @char_per_block must be 0 in the + * generic format table. Drivers could supply accurate + * information from their drm_mode_config.get_format_info hook + * if they want the core to be validating the pitch. + */ + u8 char_per_block[DRM_FORMAT_MAX_PLANES]; + }; + + /** + * @block_w: + * + * Block width in pixels, this is intended to be accessed through + * drm_format_info_block_width() + */ + u8 block_w[DRM_FORMAT_MAX_PLANES]; + + /** + * @block_h: + * + * Block height in pixels, this is intended to be accessed through + * drm_format_info_block_height() + */ + u8 block_h[DRM_FORMAT_MAX_PLANES]; + + /** @hsub: Horizontal chroma subsampling factor */ + u8 hsub; + /** @vsub: Vertical chroma subsampling factor */ + u8 vsub; + + /** @has_alpha: Does the format embeds an alpha component? */ + bool has_alpha; + + /** @is_yuv: Is it a YUV format? */ + bool is_yuv; + + /** @is_color_indexed: Is it a color-indexed format? */ + bool is_color_indexed; +}; + +/** + * drm_format_info_is_yuv_packed - check that the format info matches a YUV + * format with data laid in a single plane + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a packed YUV format. + */ +static inline bool +drm_format_info_is_yuv_packed(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 1; +} + +/** + * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV + * format with data laid in two planes (luminance and chrominance) + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a semiplanar YUV format. + */ +static inline bool +drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 2; +} + +/** + * drm_format_info_is_yuv_planar - check that the format info matches a YUV + * format with data laid in three planes (one for each YUV component) + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a planar YUV format. + */ +static inline bool +drm_format_info_is_yuv_planar(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 3; +} + +/** + * drm_format_info_is_yuv_sampling_410 - check that the format info matches a + * YUV format with 4:1:0 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:1:0 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 4 && info->vsub == 4; +} + +/** + * drm_format_info_is_yuv_sampling_411 - check that the format info matches a + * YUV format with 4:1:1 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:1:1 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 4 && info->vsub == 1; +} + +/** + * drm_format_info_is_yuv_sampling_420 - check that the format info matches a + * YUV format with 4:2:0 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:2:0 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 2 && info->vsub == 2; +} + +/** + * drm_format_info_is_yuv_sampling_422 - check that the format info matches a + * YUV format with 4:2:2 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:2:2 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 2 && info->vsub == 1; +} + +/** + * drm_format_info_is_yuv_sampling_444 - check that the format info matches a + * YUV format with 4:4:4 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:4:4 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 1 && info->vsub == 1; +} + +/** + * drm_format_info_plane_width - width of the plane given the first plane + * @info: pixel format info + * @width: width of the first plane + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +static inline +int drm_format_info_plane_width(const struct drm_format_info *info, int width, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return width; + + return width / info->hsub; +} + +/** + * drm_format_info_plane_height - height of the plane given the first plane + * @info: pixel format info + * @height: height of the first plane + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +static inline +int drm_format_info_plane_height(const struct drm_format_info *info, int height, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return height; + + return height / info->vsub; +} + +const struct drm_format_info *__drm_format_info(u32 format); +const struct drm_format_info *drm_format_info(u32 format); +const struct drm_format_info * +drm_get_format_info(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd); +uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); +uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, + uint32_t bpp, uint32_t depth); +unsigned int drm_format_info_block_width(const struct drm_format_info *info, + int plane); +unsigned int drm_format_info_block_height(const struct drm_format_info *info, + int plane); +unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane); +uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, + int plane, unsigned int buffer_width); + +#endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h new file mode 100644 index 000000000..0dcc07b68 --- /dev/null +++ b/include/drm/drm_framebuffer.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_FRAMEBUFFER_H__ +#define __DRM_FRAMEBUFFER_H__ + +#include <linux/ctype.h> +#include <linux/list.h> +#include <linux/sched.h> + +#include <drm/drm_fourcc.h> +#include <drm/drm_mode_object.h> + +struct drm_clip_rect; +struct drm_device; +struct drm_file; +struct drm_framebuffer; +struct drm_gem_object; + +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ +struct drm_framebuffer_funcs { + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by calling + * &drm_mode_config_funcs.fb_create. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. + */ + void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in &struct drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*create_handle)(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle); + /** + * @dirty: + * + * Optional callback for the dirty fb IOCTL. + * + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. + * + * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement + * this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips); +}; + +/** + * struct drm_framebuffer - frame buffer object + * + * Note that the fb is refcounted for the benefit of driver internals, + * for example some hw, disabling a CRTC/plane is asynchronous, and + * scanout does not actually complete until the next vblank. So some + * cleanup (like releasing the reference(s) on the backing GEM bo(s)) + * should be deferred. In cases like this, the driver would like to + * hold a ref to the fb even though it has already been removed from + * userspace perspective. See drm_framebuffer_get() and + * drm_framebuffer_put(). + * + * The refcount is stored inside the mode object @base. + */ +struct drm_framebuffer { + /** + * @dev: DRM device this framebuffer belongs to + */ + struct drm_device *dev; + /** + * @head: Place on the &drm_mode_config.fb_list, access protected by + * &drm_mode_config.fb_lock. + */ + struct list_head head; + + /** + * @base: base modeset object structure, contains the reference count. + */ + struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + + /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** + * @funcs: framebuffer vfunc table + */ + const struct drm_framebuffer_funcs *funcs; + /** + * @pitches: Line stride per buffer. For userspace created object this + * is copied from drm_mode_fb_cmd2. + */ + unsigned int pitches[DRM_FORMAT_MAX_PLANES]; + /** + * @offsets: Offset from buffer start to the actual pixel data in bytes, + * per buffer. For userspace created object this is copied from + * drm_mode_fb_cmd2. + * + * Note that this is a linear offset and does not take into account + * tiling or buffer layout per @modifier. It is meant to be used when + * the actual pixel data for this framebuffer plane starts at an offset, + * e.g. when multiple planes are allocated within the same backing + * storage buffer object. For tiled layouts this generally means its + * @offsets must at least be tile-size aligned, but hardware often has + * stricter requirements. + * + * This should not be used to specifiy x/y pixel offsets into the buffer + * data (even for linear buffers). Specifying an x/y pixel offset is + * instead done through the source rectangle in &struct drm_plane_state. + */ + unsigned int offsets[DRM_FORMAT_MAX_PLANES]; + /** + * @modifier: Data layout modifier. This is used to describe + * tiling, or also special layouts (like compression) of auxiliary + * buffers. For userspace created object this is copied from + * drm_mode_fb_cmd2. + */ + uint64_t modifier; + /** + * @width: Logical width of the visible area of the framebuffer, in + * pixels. + */ + unsigned int width; + /** + * @height: Logical height of the visible area of the framebuffer, in + * pixels. + */ + unsigned int height; + /** + * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or + * DRM_MODE_FB_MODIFIERS. + */ + int flags; + /** + * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_x; + /** + * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_y; + /** + * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. + */ + struct list_head filp_head; + /** + * @obj: GEM objects backing the framebuffer, one per plane (optional). + * + * This is used by the GEM framebuffer helpers, see e.g. + * drm_gem_fb_create(). + */ + struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES]; +}; + +#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) + +int drm_framebuffer_init(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id); +void drm_framebuffer_remove(struct drm_framebuffer *fb); +void drm_framebuffer_cleanup(struct drm_framebuffer *fb); +void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); + +/** + * drm_framebuffer_get - acquire a framebuffer reference + * @fb: DRM framebuffer + * + * This function increments the framebuffer's reference count. + */ +static inline void drm_framebuffer_get(struct drm_framebuffer *fb) +{ + drm_mode_object_get(&fb->base); +} + +/** + * drm_framebuffer_put - release a framebuffer reference + * @fb: DRM framebuffer + * + * This function decrements the framebuffer's reference count and frees the + * framebuffer if the reference count drops to zero. + */ +static inline void drm_framebuffer_put(struct drm_framebuffer *fb) +{ + drm_mode_object_put(&fb->base); +} + +/** + * drm_framebuffer_read_refcount - read the framebuffer reference count. + * @fb: framebuffer + * + * This functions returns the framebuffer's reference count. + */ +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) +{ + return kref_read(&fb->base.refcount); +} + +/** + * drm_framebuffer_assign - store a reference to the fb + * @p: location to store framebuffer + * @fb: new framebuffer (maybe NULL) + * + * This functions sets the location to store a reference to the framebuffer, + * unreferencing the framebuffer that was previously stored in that location. + */ +static inline void drm_framebuffer_assign(struct drm_framebuffer **p, + struct drm_framebuffer *fb) +{ + if (fb) + drm_framebuffer_get(fb); + if (*p) + drm_framebuffer_put(*p); + *p = fb; +} + +/* + * drm_for_each_fb - iterate over all framebuffers + * @fb: the loop cursor + * @dev: the DRM device + * + * Iterate over all framebuffers of @dev. User must hold + * &drm_mode_config.fb_lock. + */ +#define drm_for_each_fb(fb, dev) \ + for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ + fb = list_first_entry(&(dev)->mode_config.fb_list, \ + struct drm_framebuffer, head); \ + &fb->head != (&(dev)->mode_config.fb_list); \ + fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + +/** + * struct drm_afbc_framebuffer - a special afbc frame buffer object + * + * A derived class of struct drm_framebuffer, dedicated for afbc use cases. + */ +struct drm_afbc_framebuffer { + /** + * @base: base framebuffer structure. + */ + struct drm_framebuffer base; + /** + * @block_width: width of a single afbc block + */ + u32 block_width; + /** + * @block_height: height of a single afbc block + */ + u32 block_height; + /** + * @aligned_width: aligned frame buffer width + */ + u32 aligned_width; + /** + * @aligned_height: aligned frame buffer height + */ + u32 aligned_height; + /** + * @offset: offset of the first afbc header + */ + u32 offset; + /** + * @afbc_size: minimum size of afbc buffer + */ + u32 afbc_size; +}; + +#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base) + +#endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h new file mode 100644 index 000000000..60b2dda8d --- /dev/null +++ b/include/drm/drm_gem.h @@ -0,0 +1,480 @@ +#ifndef __DRM_GEM_H__ +#define __DRM_GEM_H__ + +/* + * GEM Graphics Execution Manager Driver Interfaces + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/kref.h> +#include <linux/dma-resv.h> + +#include <drm/drm_vma_manager.h> + +struct iosys_map; +struct drm_gem_object; + +/** + * struct drm_gem_object_funcs - GEM object functions + */ +struct drm_gem_object_funcs { + /** + * @free: + * + * Deconstructor for drm_gem_objects. + * + * This callback is mandatory. + */ + void (*free)(struct drm_gem_object *obj); + + /** + * @open: + * + * Called upon GEM handle creation. + * + * This callback is optional. + */ + int (*open)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @close: + * + * Called upon GEM handle release. + * + * This callback is optional. + */ + void (*close)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + * + * This callback is optional. + */ + void (*print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** + * @export: + * + * Export backing buffer as a &dma_buf. + * If this is not set drm_gem_prime_export() is used. + * + * This callback is optional. + */ + struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); + + /** + * @pin: + * + * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. + * + * This callback is optional. + */ + int (*pin)(struct drm_gem_object *obj); + + /** + * @unpin: + * + * Unpin backing buffer. Used by the drm_gem_map_detach() helper. + * + * This callback is optional. + */ + void (*unpin)(struct drm_gem_object *obj); + + /** + * @get_sg_table: + * + * Returns a Scatter-Gather table representation of the buffer. + * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. + * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() + * in drm_gem_unmap_buf(), therefore these helpers and this callback + * here cannot be used for sg tables pointing at driver private memory + * ranges. + * + * See also drm_prime_pages_to_sg(). + */ + struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); + + /** + * @vmap: + * + * Returns a virtual address for the buffer. Used by the + * drm_gem_dmabuf_vmap() helper. + * + * This callback is optional. + */ + int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); + + /** + * @vunmap: + * + * Releases the address previously returned by @vmap. Used by the + * drm_gem_dmabuf_vunmap() helper. + * + * This callback is optional. + */ + void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); + + /** + * @mmap: + * + * Handle mmap() of the gem object, setup vma accordingly. + * + * This callback is optional. + * + * The callback is used by both drm_gem_mmap_obj() and + * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not + * used, the @mmap callback must set vma->vm_ops instead. + */ + int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); + + /** + * @vm_ops: + * + * Virtual memory operations used with mmap. + * + * This is optional but necessary for mmap support. + */ + const struct vm_operations_struct *vm_ops; +}; + +/** + * struct drm_gem_lru - A simple LRU helper + * + * A helper for tracking GEM objects in a given state, to aid in + * driver's shrinker implementation. Tracks the count of pages + * for lockless &shrinker.count_objects, and provides + * &drm_gem_lru_scan for driver's &shrinker.scan_objects + * implementation. + */ +struct drm_gem_lru { + /** + * @lock: + * + * Lock protecting movement of GEM objects between LRUs. All + * LRUs that the object can move between should be protected + * by the same lock. + */ + struct mutex *lock; + + /** + * @count: + * + * The total number of backing pages of the GEM objects in + * this LRU. + */ + long count; + + /** + * @list: + * + * The LRU list. + */ + struct list_head list; +}; + +/** + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. + */ +struct drm_gem_object { + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() + * or drm_gem_object_put() to release a reference to a GEM + * buffer object. + */ + struct kref refcount; + + /** + * @handle_count: + * + * This is the GEM file_priv handle count of this object. + * + * Each handle also holds a reference. Note that when the handle_count + * drops to 0 any global names (e.g. the id in the flink namespace) will + * be cleared. + * + * Protected by &drm_device.object_name_lock. + */ + unsigned handle_count; + + /** + * @dev: DRM dev this object belongs to. + */ + struct drm_device *dev; + + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous DMA memory, special reserved blocks). In this + * case @filp is NULL. + */ + struct file *filp; + + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ + struct drm_vma_offset_node vma_node; + + /** + * @size: + * + * Size of the object, in bytes. Immutable over the object's + * lifetime. + */ + size_t size; + + /** + * @name: + * + * Global name for this object, starts at 1. 0 means unnamed. + * Access is covered by &drm_device.object_name_lock. This is used by + * the GEM_FLINK and GEM_OPEN ioctls. + */ + int name; + + /** + * @dma_buf: + * + * dma-buf associated with this GEM object. + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by &drm_device.object_name_lock. + */ + struct dma_buf *dma_buf; + + /** + * @import_attach: + * + * dma-buf attachment backing this object. + * + * Any foreign dma_buf imported as a gem object has this set to the + * attachment point for the device. This is invariant over the lifetime + * of a gem object. + * + * The &drm_gem_object_funcs.free callback is responsible for + * cleaning up the dma_buf attachment and references acquired at import + * time. + * + * Note that the drm gem/prime core does not depend upon drivers setting + * this field any more. So for drivers where this doesn't make sense + * (e.g. virtual devices or a displaylink behind an usb bus) they can + * simply leave it as NULL. + */ + struct dma_buf_attachment *import_attach; + + /** + * @resv: + * + * Pointer to reservation object associated with the this GEM object. + * + * Normally (@resv == &@_resv) except for imported GEM objects. + */ + struct dma_resv *resv; + + /** + * @_resv: + * + * A reservation object for this GEM object. + * + * This is unused for imported GEM objects. + */ + struct dma_resv _resv; + + /** + * @funcs: + * + * Optional GEM object functions. If this is set, it will be used instead of the + * corresponding &drm_driver GEM callbacks. + * + * New drivers should use this. + * + */ + const struct drm_gem_object_funcs *funcs; + + /** + * @lru_node: + * + * List node in a &drm_gem_lru. + */ + struct list_head lru_node; + + /** + * @lru: + * + * The current LRU list that the GEM object is on. + */ + struct drm_gem_lru *lru; +}; + +/** + * DRM_GEM_FOPS - Default drm GEM file operations + * + * This macro provides a shorthand for setting the GEM file ops in the + * &file_operations structure. If all you need are the default ops, use + * DEFINE_DRM_GEM_FOPS instead. + */ +#define DRM_GEM_FOPS \ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap + +/** + * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for GEM based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + DRM_GEM_FOPS,\ + } + +void drm_gem_object_release(struct drm_gem_object *obj); +void drm_gem_object_free(struct kref *kref); +int drm_gem_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_vm_open(struct vm_area_struct *vma); +void drm_gem_vm_close(struct vm_area_struct *vma); +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma); +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +/** + * drm_gem_object_get - acquire a GEM buffer object reference + * @obj: GEM buffer object + * + * This function acquires an additional reference to @obj. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline void drm_gem_object_get(struct drm_gem_object *obj) +{ + kref_get(&obj->refcount); +} + +__attribute__((nonnull)) +static inline void +__drm_gem_object_put(struct drm_gem_object *obj) +{ + kref_put(&obj->refcount, drm_gem_object_free); +} + +/** + * drm_gem_object_put - drop a GEM buffer object reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. + */ +static inline void +drm_gem_object_put(struct drm_gem_object *obj) +{ + if (obj) + __drm_gem_object_put(obj); +} + +int drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); +int drm_gem_handle_delete(struct drm_file *filp, u32 handle); + + +void drm_gem_free_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); + +struct page **drm_gem_get_pages(struct drm_gem_object *obj); +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed); + +int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, + int count, struct drm_gem_object ***objs_out); +struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout); +int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx); +void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx); +int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); + +void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); +void drm_gem_lru_remove(struct drm_gem_object *obj); +void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); +unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, + unsigned int nr_to_scan, + unsigned long *remaining, + bool (*shrink)(struct drm_gem_object *obj)); + +#endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h new file mode 100644 index 000000000..6e3319e90 --- /dev/null +++ b/include/drm/drm_gem_atomic_helper.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __DRM_GEM_ATOMIC_HELPER_H__ +#define __DRM_GEM_ATOMIC_HELPER_H__ + +#include <linux/iosys-map.h> + +#include <drm/drm_fourcc.h> +#include <drm/drm_plane.h> + +struct drm_simple_display_pipe; + +/* + * Plane Helpers + */ + +int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state); +int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/* + * Helpers for planes with shadow buffers + */ + +/** + * DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels + * + * For drivers with shadow planes, the maximum width of the framebuffer is + * usually independent from hardware limitations. Drivers can initialize struct + * drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH. + */ +#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u) + +/** + * DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines + * + * For drivers with shadow planes, the maximum height of the framebuffer is + * usually independent from hardware limitations. Drivers can initialize struct + * drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT. + */ +#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u) + +/** + * struct drm_shadow_plane_state - plane state for planes with shadow buffers + * + * For planes that use a shadow buffer, struct drm_shadow_plane_state + * provides the regular plane state plus mappings of the shadow buffer + * into kernel address space. + */ +struct drm_shadow_plane_state { + /** @base: plane state */ + struct drm_plane_state base; + + /* Transitional state - do not export or duplicate */ + + /** + * @map: Mappings of the plane's framebuffer BOs in to kernel address space + * + * The memory mappings stored in map should be established in the plane's + * prepare_fb callback and removed in the cleanup_fb callback. + */ + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + + /** + * @data: Address of each framebuffer BO's data + * + * The address of the data stored in each mapping. This is different + * for framebuffers with non-zero offset fields. + */ + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; +}; + +/** + * to_drm_shadow_plane_state - upcasts from struct drm_plane_state + * @state: the plane state + */ +static inline struct drm_shadow_plane_state * +to_drm_shadow_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct drm_shadow_plane_state, base); +} + +void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, + struct drm_shadow_plane_state *new_shadow_plane_state); +void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state); +void __drm_gem_reset_shadow_plane(struct drm_plane *plane, + struct drm_shadow_plane_state *shadow_plane_state); + +void drm_gem_reset_shadow_plane(struct drm_plane *plane); +struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane); +void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_FUNCS - + * Initializes struct drm_plane_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_funcs to use the rsp helper functions. + */ +#define DRM_GEM_SHADOW_PLANE_FUNCS \ + .reset = drm_gem_reset_shadow_plane, \ + .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \ + .atomic_destroy_state = drm_gem_destroy_shadow_plane_state + +int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); +void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS - + * Initializes struct drm_plane_helper_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_helper_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \ + .prepare_fb = drm_gem_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_cleanup_shadow_fb + +int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe); +struct drm_plane_state * +drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe); +void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS - + * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \ + .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \ + .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \ + .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \ + .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state + +#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */ diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h new file mode 100644 index 000000000..8a043235d --- /dev/null +++ b/include/drm/drm_gem_dma_helper.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_GEM_DMA_HELPER_H__ +#define __DRM_GEM_DMA_HELPER_H__ + +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_gem.h> + +struct drm_mode_create_dumb; + +/** + * struct drm_gem_dma_object - GEM object backed by DMA memory allocations + * @base: base GEM object + * @dma_addr: DMA address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. + * @vaddr: kernel virtual address of the backing memory + * @map_noncoherent: if true, the GEM object is backed by non-coherent memory + */ +struct drm_gem_dma_object { + struct drm_gem_object base; + dma_addr_t dma_addr; + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM DMA */ + void *vaddr; + + bool map_noncoherent; +}; + +#define to_drm_gem_dma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_dma_object, base) + +struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm, + size_t size); +void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj); +void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj, + struct drm_printer *p, unsigned int indent); +struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj); +int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj, + struct iosys_map *map); +int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma); + +extern const struct vm_operations_struct drm_gem_dma_vm_ops; + +/* + * GEM object functions + */ + +/** + * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free() + * @obj: GEM object to free + * + * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers + * should use it as their &drm_gem_object_funcs.free handler. + */ +static inline void drm_gem_dma_object_free(struct drm_gem_object *obj) +{ + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); + + drm_gem_dma_free(dma_obj); +} + +/** + * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object + * + * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers + * should use this function as their &drm_gem_object_funcs.print_info handler. + */ +static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); + + drm_gem_dma_print_info(dma_obj, p, indent); +} + +/** + * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table() + * @obj: GEM object + * + * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should + * use it as their &drm_gem_object_funcs.get_sg_table handler. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); + + return drm_gem_dma_get_sg_table(dma_obj); +} + +/* + * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap() + * @obj: GEM object + * @map: Returns the kernel virtual address of the DMA GEM object's backing store. + * + * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should + * use it as their &drm_gem_object_funcs.vmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) +{ + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); + + return drm_gem_dma_vmap(dma_obj, map); +} + +/** + * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap() + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should + * use it as their &drm_gem_object_funcs.mmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); + + return drm_gem_dma_mmap(dma_obj, vma); +} + +/* + * Driver ops + */ + +/* create memory region for DRM framebuffer */ +int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ +int drm_gem_dma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +struct drm_gem_object * +drm_gem_dma_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/** + * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations + * @dumb_create_func: callback function for .dumb_create + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that + * override the default implementation of &struct rm_driver.dumb_create. Use + * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use + * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. + */ +#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ + .dumb_create = (dumb_create_func), \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \ + .gem_prime_mmap = drm_gem_prime_mmap + +/** + * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead. + */ +#define DRM_GEM_DMA_DRIVER_OPS \ + DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create) + +/** + * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations + * ensuring a virtual address + * on the buffer + * @dumb_create_func: callback function for .dumb_create + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + * + * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that + * override the default implementation of &struct drm_driver.dumb_create. Use + * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use + * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. + */ +#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ + .dumb_create = dumb_create_func, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \ + .gem_prime_mmap = drm_gem_prime_mmap + +/** + * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual + * address on the buffer + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS + * instead. + */ +#define DRM_GEM_DMA_DRIVER_OPS_VMAP \ + DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create) + +struct drm_gem_object * +drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/* + * File ops + */ + +#ifndef CONFIG_MMU +unsigned long drm_gem_dma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_dma_get_unmapped_area, +#else +#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for DMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_DMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap,\ + DRM_GEM_DMA_UNMAPPED_AREA_FOPS \ + } + +#endif /* __DRM_GEM_DMA_HELPER_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h new file mode 100644 index 000000000..d302521f3 --- /dev/null +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -0,0 +1,53 @@ +#ifndef __DRM_GEM_FB_HELPER_H__ +#define __DRM_GEM_FB_HELPER_H__ + +#include <linux/dma-buf.h> +#include <linux/iosys-map.h> + +struct drm_afbc_framebuffer; +struct drm_device; +struct drm_fb_helper_surface_size; +struct drm_file; +struct drm_framebuffer; +struct drm_framebuffer_funcs; +struct drm_gem_object; +struct drm_mode_fb_cmd2; + +#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52) + +struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, + unsigned int plane); +void drm_gem_fb_destroy(struct drm_framebuffer *fb); +int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, + unsigned int *handle); + +int drm_gem_fb_init_with_funcs(struct drm_device *dev, + struct drm_framebuffer *fb, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * +drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, + struct iosys_map *data); +void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map); +int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); +void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); + +#define drm_is_afbc(modifier) \ + (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) + +int drm_gem_fb_afbc_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_afbc_framebuffer *afbc_fb); + +#endif diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h new file mode 100644 index 000000000..a2201b248 --- /dev/null +++ b/include/drm/drm_gem_shmem_helper.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __DRM_GEM_SHMEM_HELPER_H__ +#define __DRM_GEM_SHMEM_HELPER_H__ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mutex.h> + +#include <drm/drm_file.h> +#include <drm/drm_gem.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_prime.h> + +struct dma_buf_attachment; +struct drm_mode_create_dumb; +struct drm_printer; +struct sg_table; + +/** + * struct drm_gem_shmem_object - GEM object backed by shmem + */ +struct drm_gem_shmem_object { + /** + * @base: Base GEM object + */ + struct drm_gem_object base; + + /** + * @pages_lock: Protects the page table and use count + */ + struct mutex pages_lock; + + /** + * @pages: Page table + */ + struct page **pages; + + /** + * @pages_use_count: + * + * Reference count on the pages table. + * The pages are put when the count reaches zero. + */ + unsigned int pages_use_count; + + /** + * @madv: State for madvise + * + * 0 is active/inuse. + * A negative value is the object is purged. + * Positive values are driver specific and not used by the helpers. + */ + int madv; + + /** + * @madv_list: List entry for madvise tracking + * + * Typically used by drivers to track purgeable objects + */ + struct list_head madv_list; + + /** + * @pages_mark_dirty_on_put: + * + * Mark pages as dirty when they are put. + */ + unsigned int pages_mark_dirty_on_put : 1; + + /** + * @pages_mark_accessed_on_put: + * + * Mark pages as accessed when they are put. + */ + unsigned int pages_mark_accessed_on_put : 1; + + /** + * @sgt: Scatter/gather table for imported PRIME buffers + */ + struct sg_table *sgt; + + /** + * @vmap_lock: Protects the vmap address and use count + */ + struct mutex vmap_lock; + + /** + * @vaddr: Kernel virtual address of the backing memory + */ + void *vaddr; + + /** + * @vmap_use_count: + * + * Reference count on the virtual address. + * The address are un-mapped when the count reaches zero. + */ + unsigned int vmap_use_count; + + /** + * @map_wc: map object write-combined (instead of using shmem defaults). + */ + bool map_wc; +}; + +#define to_drm_gem_shmem_obj(obj) \ + container_of(obj, struct drm_gem_shmem_object, base) + +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); + +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); + +int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); + +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) +{ + return (shmem->madv > 0) && + !shmem->vmap_use_count && shmem->sgt && + !shmem->base.dma_buf && !shmem->base.import_attach; +} + +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); + +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); + +void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, + struct drm_printer *p, unsigned int indent); + +extern const struct vm_operations_struct drm_gem_shmem_vm_ops; + +/* + * GEM object functions + */ + +/** + * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free() + * @obj: GEM object to free + * + * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers + * should use it as their &drm_gem_object_funcs.free handler. + */ +static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_free(shmem); +} + +/** + * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object + * + * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should + * use this function as their &drm_gem_object_funcs.print_info handler. + */ +static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_print_info(shmem, p, indent); +} + +/** + * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.pin handler. + */ +static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_pin(shmem); +} + +/** + * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.unpin handler. + */ +static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_unpin(shmem); +} + +/** + * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.get_sg_table handler. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or error pointer on failure. + */ +static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_get_sg_table(shmem); +} + +/* + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * @obj: GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. + * + * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.vmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_vmap(shmem, map); +} + +/* + * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap() + * @obj: GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped + * + * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.vunmap handler. + */ +static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_vunmap(shmem, map); +} + +/** + * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap() + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.mmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_mmap(shmem, vma); +} + +/* + * Driver ops + */ + +struct drm_gem_object * +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +/** + * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations + * + * This macro provides a shortcut for setting the shmem GEM operations in + * the &drm_driver structure. + */ +#define DRM_GEM_SHMEM_DRIVER_OPS \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ + .gem_prime_mmap = drm_gem_prime_mmap, \ + .dumb_create = drm_gem_shmem_dumb_create + +#endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h new file mode 100644 index 000000000..4c003b4f1 --- /dev/null +++ b/include/drm/drm_gem_ttm_helper.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_GEM_TTM_HELPER_H +#define DRM_GEM_TTM_HELPER_H + +#include <linux/container_of.h> + +#include <drm/drm_device.h> +#include <drm/drm_gem.h> +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> + +struct iosys_map; + +#define drm_gem_ttm_of_gem(gem_obj) \ + container_of(gem_obj, struct ttm_buffer_object, base) + +void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *gem); +int drm_gem_ttm_vmap(struct drm_gem_object *gem, + struct iosys_map *map); +void drm_gem_ttm_vunmap(struct drm_gem_object *gem, + struct iosys_map *map); +int drm_gem_ttm_mmap(struct drm_gem_object *gem, + struct vm_area_struct *vma); + +int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); + +#endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h new file mode 100644 index 000000000..c083a1d71 --- /dev/null +++ b/include/drm/drm_gem_vram_helper.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_GEM_VRAM_HELPER_H +#define DRM_GEM_VRAM_HELPER_H + +#include <drm/drm_file.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_ttm_helper.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_modes.h> +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> + +#include <linux/container_of.h> +#include <linux/iosys-map.h> + +struct drm_mode_create_dumb; +struct drm_plane; +struct drm_plane_state; +struct drm_simple_display_pipe; +struct filp; +struct vm_area_struct; + +#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0) +#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1) +#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2) + +/* + * Buffer-object helpers + */ + +/** + * struct drm_gem_vram_object - GEM object backed by VRAM + * @bo: TTM buffer object + * @map: Mapping information for @bo + * @placement: TTM placement information. Supported placements are \ + %TTM_PL_VRAM and %TTM_PL_SYSTEM + * @placements: TTM placement information. + * + * The type struct drm_gem_vram_object represents a GEM object that is + * backed by VRAM. It can be used for simple framebuffer devices with + * dedicated memory. The buffer object can be evicted to system memory if + * video memory becomes scarce. + * + * GEM VRAM objects perform reference counting for pin and mapping + * operations. So a buffer object that has been pinned N times with + * drm_gem_vram_pin() must be unpinned N times with + * drm_gem_vram_unpin(). The same applies to pairs of + * drm_gem_vram_kmap() and drm_gem_vram_kunmap(), as well as pairs of + * drm_gem_vram_vmap() and drm_gem_vram_vunmap(). + */ +struct drm_gem_vram_object { + struct ttm_buffer_object bo; + struct iosys_map map; + + /** + * @vmap_use_count: + * + * Reference count on the virtual address. + * The address are un-mapped when the count reaches zero. + */ + unsigned int vmap_use_count; + + /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ + struct ttm_placement placement; + struct ttm_place placements[2]; +}; + +/** + * drm_gem_vram_of_bo - Returns the container of type + * &struct drm_gem_vram_object for field bo. + * @bo: the VRAM buffer object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_bo( + struct ttm_buffer_object *bo) +{ + return container_of(bo, struct drm_gem_vram_object, bo); +} + +/** + * drm_gem_vram_of_gem - Returns the container of type + * &struct drm_gem_vram_object for field gem. + * @gem: the GEM object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_gem( + struct drm_gem_object *gem) +{ + return container_of(gem, struct drm_gem_vram_object, bo.base); +} + +struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, + size_t size, + unsigned long pg_align); +void drm_gem_vram_put(struct drm_gem_vram_object *gbo); +s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); +int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); +int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map); + +int drm_gem_vram_fill_create_dumb(struct drm_file *file, + struct drm_device *dev, + unsigned long pg_align, + unsigned long pitch_align, + struct drm_mode_create_dumb *args); + +/* + * Helpers for struct drm_driver + */ + +int drm_gem_vram_driver_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +/* + * Helpers for struct drm_plane_helper_funcs + */ +int +drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state); +void +drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state); + +/** + * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - + * Initializes struct drm_plane_helper_funcs for VRAM handling + * + * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This + * macro initializes struct drm_plane_helper_funcs to use the respective helper + * functions. + */ +#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \ + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \ + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb + +/* + * Helpers for struct drm_simple_display_pipe_funcs + */ + +int drm_gem_vram_simple_display_pipe_prepare_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *new_state); + +void drm_gem_vram_simple_display_pipe_cleanup_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); + +/** + * define DRM_GEM_VRAM_DRIVER - default callback functions for \ + &struct drm_driver + * + * Drivers that use VRAM MM and GEM VRAM can use this macro to initialize + * &struct drm_driver with default functions. + */ +#define DRM_GEM_VRAM_DRIVER \ + .debugfs_init = drm_vram_mm_debugfs_init, \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ + .gem_prime_mmap = drm_gem_prime_mmap + +/* + * VRAM memory manager + */ + +/** + * struct drm_vram_mm - An instance of VRAM MM + * @vram_base: Base address of the managed video memory + * @vram_size: Size of the managed video memory in bytes + * @bdev: The TTM BO device. + * @funcs: TTM BO functions + * + * The fields &struct drm_vram_mm.vram_base and + * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are + * available for public read access. Use the field + * &struct drm_vram_mm.bdev to access the TTM BO device. + */ +struct drm_vram_mm { + uint64_t vram_base; + size_t vram_size; + + struct ttm_device bdev; +}; + +/** + * drm_vram_mm_of_bdev() - \ + Returns the container of type &struct ttm_device for field bdev. + * @bdev: the TTM BO device + * + * Returns: + * The containing instance of &struct drm_vram_mm + */ +static inline struct drm_vram_mm *drm_vram_mm_of_bdev( + struct ttm_device *bdev) +{ + return container_of(bdev, struct drm_vram_mm, bdev); +} + +void drm_vram_mm_debugfs_init(struct drm_minor *minor); + +/* + * Helpers for integration with struct drm_device + */ + +int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base, + size_t vram_size); + +/* + * Mode-config helpers + */ + +enum drm_mode_status +drm_vram_helper_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode); + +#endif diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h new file mode 100644 index 000000000..6ed61c371 --- /dev/null +++ b/include/drm/drm_ioctl.h @@ -0,0 +1,185 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_IOCTL_H_ +#define _DRM_IOCTL_H_ + +#include <linux/types.h> +#include <linux/bitops.h> + +#include <asm/ioctl.h> + +struct drm_device; +struct drm_file; +struct file; + +/** + * drm_ioctl_t - DRM ioctl function type. + * @dev: DRM device inode + * @data: private pointer of the ioctl call + * @file_priv: DRM file this ioctl was made on + * + * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data + * into kernel-space, and will also copy it back, depending upon the read/write + * settings in the ioctl command code. + */ +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/** + * drm_ioctl_compat_t - compatibility DRM ioctl function type. + * @filp: file pointer + * @cmd: ioctl command code + * @arg: DRM file this ioctl was made on + * + * Just a typedef to make declaring an array of compatibility handlers easier. + * New drivers shouldn't screw up the structure layout for their ioctl + * structures and hence never need this. + */ +typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + unsigned long arg); + +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) +#define DRM_MAJOR 226 + +/** + * enum drm_ioctl_flags - DRM ioctl flags + * + * Various flags that can be set in &drm_ioctl_desc.flags to control how + * userspace can use a given ioctl. + */ +enum drm_ioctl_flags { + /** + * @DRM_AUTH: + * + * This is for ioctl which are used for rendering, and require that the + * file descriptor is either for a render node, or if it's a + * legacy/primary node, then it must be authenticated. + */ + DRM_AUTH = BIT(0), + /** + * @DRM_MASTER: + * + * This must be set for any ioctl which can change the modeset or + * display state. Userspace must call the ioctl through a primary node, + * while it is the active master. + * + * Note that read-only modeset ioctl can also be called by + * unauthenticated clients, or when a master is not the currently active + * one. + */ + DRM_MASTER = BIT(1), + /** + * @DRM_ROOT_ONLY: + * + * Anything that could potentially wreak a master file descriptor needs + * to have this flag set. Current that's only for the SETMASTER and + * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving + * master (display compositor) into compliance. + * + * This is equivalent to callers with the SYSADMIN capability. + */ + DRM_ROOT_ONLY = BIT(2), + /** + * @DRM_UNLOCKED: + * + * Whether &drm_ioctl_desc.func should be called with the DRM BKL held + * or not. Enforced as the default for all modern drivers, hence there + * should never be a need to set this flag. + * + * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the + * only legacy IOCTL which needs this. + */ + DRM_UNLOCKED = BIT(4), + /** + * @DRM_RENDER_ALLOW: + * + * This is used for all ioctl needed for rendering only, for drivers + * which support render nodes. This should be all new render drivers, + * and hence it should be always set for any ioctl with DRM_AUTH set. + * Note though that read-only query ioctl might have this set, but have + * not set DRM_AUTH because they do not require authentication. + */ + DRM_RENDER_ALLOW = BIT(5), +}; + +/** + * struct drm_ioctl_desc - DRM driver ioctl entry + * @cmd: ioctl command number, without flags + * @flags: a bitmask of &enum drm_ioctl_flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + * + * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV() + * macro. + */ +struct drm_ioctl_desc { + unsigned int cmd; + enum drm_ioctl_flags flags; + drm_ioctl_t *func; + const char *name; +}; + +/** + * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc + * @ioctl: ioctl command suffix + * @_func: handler for the ioctl + * @_flags: a bitmask of &enum drm_ioctl_flags + * + * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl + * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that + * to DRM_IOCTL_NR(). + */ +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32); +#ifdef CONFIG_COMPAT +long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#else +/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */ +#define drm_compat_ioctl NULL +#endif +bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); + +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* _DRM_IOCTL_H_ */ diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h new file mode 100644 index 000000000..5c9ef6a2a --- /dev/null +++ b/include/drm/drm_lease.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright © 2017 Keith Packard <keithp@keithp.com> + */ + +#ifndef _DRM_LEASE_H_ +#define _DRM_LEASE_H_ + +struct drm_file; +struct drm_device; +struct drm_master; + +struct drm_master *drm_lease_owner(struct drm_master *master); + +void drm_lease_destroy(struct drm_master *lessee); + +bool drm_lease_held(struct drm_file *file_priv, int id); + +bool _drm_lease_held(struct drm_file *file_priv, int id); + +void drm_lease_revoke(struct drm_master *master); + +uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs); + +int drm_mode_create_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_list_lessees_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_get_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_revoke_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +#endif /* _DRM_LEASE_H_ */ diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h new file mode 100644 index 000000000..0fc85418a --- /dev/null +++ b/include/drm/drm_legacy.h @@ -0,0 +1,331 @@ +#ifndef __DRM_DRM_LEGACY_H__ +#define __DRM_DRM_LEGACY_H__ +/* + * Legacy driver interfaces for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/agp_backend.h> + +#include <drm/drm.h> +#include <drm/drm_auth.h> + +struct drm_device; +struct drm_driver; +struct file; +struct pci_driver; + +/* + * Legacy Support for palateontologic DRM drivers + * + * If you add a new driver and it uses any of these functions or structures, + * you're doing it terribly wrong. + */ + +/* + * Hash-table Support + */ + +struct drm_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct drm_open_hash { + struct hlist_head *table; + u8 order; +}; + +/** + * DMA buffer. + */ +struct drm_buf { + int idx; /**< Index into master buflist */ + int total; /**< Buffer size */ + int order; /**< log-base-2(total) */ + int used; /**< Amount of buffer in use (for DMA) */ + unsigned long offset; /**< Byte offset (used internally) */ + void *address; /**< Address of buffer */ + unsigned long bus_address; /**< Bus address of buffer */ + struct drm_buf *next; /**< Kernel-only: used for free list */ + __volatile__ int waiting; /**< On kernel DMA queue */ + __volatile__ int pending; /**< On hardware DMA queue */ + struct drm_file *file_priv; /**< Private of holding file descr */ + int context; /**< Kernel queue for this buffer */ + int while_locked; /**< Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /**< Which list we're on */ + + int dev_priv_size; /**< Size of buffer private storage */ + void *dev_private; /**< Per-buffer private storage */ +}; + +typedef struct drm_dma_handle { + dma_addr_t busaddr; + void *vaddr; + size_t size; +} drm_dma_handle_t; + +/** + * Buffer entry. There is one of this for each buffer size order. + */ +struct drm_buf_entry { + int buf_size; /**< size */ + int buf_count; /**< number of buffers */ + struct drm_buf *buflist; /**< buffer list */ + int seg_count; + int page_order; + struct drm_dma_handle **seglist; + + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ +}; + +/** + * DMA data. + */ +struct drm_device_dma { + + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + int buf_count; /**< total number of buffers */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + int seg_count; + int page_count; /**< number of pages */ + unsigned long *pagelist; /**< page list */ + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01, + _DRM_DMA_USE_SG = 0x02, + _DRM_DMA_USE_FB = 0x04, + _DRM_DMA_USE_PCI_RO = 0x08 + } flags; + +}; + +/** + * Scatter-gather memory. + */ +struct drm_sg_mem { + unsigned long handle; + void *virtual; + int pages; + struct page **pagelist; + dma_addr_t *busaddr; +}; + +/** + * Kernel side of a mapping + */ +struct drm_local_map { + dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ + unsigned long size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + void *handle; /**< User-space: "Handle" to pass to mmap() */ + /**< Kernel-space: kernel-virtual address */ + int mtrr; /**< MTRR slot used */ +}; + +typedef struct drm_local_map drm_local_map_t; + +/** + * Mappings list + */ +struct drm_map_list { + struct list_head head; /**< list head */ + struct drm_hash_item hash; + struct drm_local_map *map; /**< mapping */ + uint64_t user_token; + struct drm_master *master; +}; + +int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, struct drm_local_map **map_p); +struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); +void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); +int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); +struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); +int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); + +int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); +int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); + +/** + * Test that the hardware lock is held by the caller, returning otherwise. + * + * \param dev DRM device. + * \param filp file pointer of the caller. + */ +#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ +do { \ + if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ + _file_priv->master->lock.file_priv != _file_priv) { \ + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ + __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ + _file_priv->master->lock.file_priv, _file_priv); \ + return -EINVAL; \ + } \ +} while (0) + +void drm_legacy_idlelock_take(struct drm_lock_data *lock); +void drm_legacy_idlelock_release(struct drm_lock_data *lock); + +/* drm_irq.c */ +int drm_legacy_irq_uninstall(struct drm_device *dev); + +/* drm_pci.c */ + +#ifdef CONFIG_PCI + +int drm_legacy_pci_init(const struct drm_driver *driver, + struct pci_driver *pdriver); +void drm_legacy_pci_exit(const struct drm_driver *driver, + struct pci_driver *pdriver); + +#else + +static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, + size_t size, size_t align) +{ + return NULL; +} + +static inline void drm_pci_free(struct drm_device *dev, + struct drm_dma_handle *dmah) +{ +} + +static inline int drm_legacy_pci_init(const struct drm_driver *driver, + struct pci_driver *pdriver) +{ + return -EINVAL; +} + +static inline void drm_legacy_pci_exit(const struct drm_driver *driver, + struct pci_driver *pdriver) +{ +} + +#endif + +/* + * AGP Support + */ + +struct drm_agp_head { + struct agp_kern_info agp_info; + struct list_head memory; + unsigned long mode; + struct agp_bridge_data *bridge; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +}; + +#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) +struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); +int drm_legacy_agp_acquire(struct drm_device *dev); +int drm_legacy_agp_release(struct drm_device *dev); +int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +#else +static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline int drm_legacy_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_legacy_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} +#endif + +/* drm_memory.c */ +void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); + +#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h new file mode 100644 index 000000000..ad08f834a --- /dev/null +++ b/include/drm/drm_managed.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _DRM_MANAGED_H_ +#define _DRM_MANAGED_H_ + +#include <linux/gfp.h> +#include <linux/overflow.h> +#include <linux/types.h> + +struct drm_device; +struct mutex; + +typedef void (*drmres_release_t)(struct drm_device *dev, void *res); + +/** + * drmm_add_action - add a managed release action to a &drm_device + * @dev: DRM device + * @action: function which should be called when @dev is released + * @data: opaque pointer, passed to @action + * + * This function adds the @release action with optional parameter @data to the + * list of cleanup actions for @dev. The cleanup actions will be run in reverse + * order in the final drm_dev_put() call for @dev. + */ +#define drmm_add_action(dev, action, data) \ + __drmm_add_action(dev, action, data, #action) + +int __must_check __drmm_add_action(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name); + +/** + * drmm_add_action_or_reset - add a managed release action to a &drm_device + * @dev: DRM device + * @action: function which should be called when @dev is released + * @data: opaque pointer, passed to @action + * + * Similar to drmm_add_action(), with the only difference that upon failure + * @action is directly called for any cleanup work necessary on failures. + */ +#define drmm_add_action_or_reset(dev, action, data) \ + __drmm_add_action_or_reset(dev, action, data, #action) + +int __must_check __drmm_add_action_or_reset(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name); + +void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc; + +/** + * drmm_kzalloc - &drm_device managed kzalloc() + * @dev: DRM device + * @size: size of the memory allocation + * @gfp: GFP allocation flags + * + * This is a &drm_device managed version of kzalloc(). The allocated memory is + * automatically freed on the final drm_dev_put(). Memory can also be freed + * before the final drm_dev_put() by calling drmm_kfree(). + */ +static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) +{ + return drmm_kmalloc(dev, size, gfp | __GFP_ZERO); +} + +/** + * drmm_kmalloc_array - &drm_device managed kmalloc_array() + * @dev: DRM device + * @n: number of array elements to allocate + * @size: size of array member + * @flags: GFP allocation flags + * + * This is a &drm_device managed version of kmalloc_array(). The allocated + * memory is automatically freed on the final drm_dev_put() and works exactly + * like a memory allocation obtained by drmm_kmalloc(). + */ +static inline void *drmm_kmalloc_array(struct drm_device *dev, + size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return drmm_kmalloc(dev, bytes, flags); +} + +/** + * drmm_kcalloc - &drm_device managed kcalloc() + * @dev: DRM device + * @n: number of array elements to allocate + * @size: size of array member + * @flags: GFP allocation flags + * + * This is a &drm_device managed version of kcalloc(). The allocated memory is + * automatically freed on the final drm_dev_put() and works exactly like a + * memory allocation obtained by drmm_kmalloc(). + */ +static inline void *drmm_kcalloc(struct drm_device *dev, + size_t n, size_t size, gfp_t flags) +{ + return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); +} + +char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); + +void drmm_kfree(struct drm_device *dev, void *data); + +void __drmm_mutex_release(struct drm_device *dev, void *res); + +/** + * drmm_mutex_init - &drm_device-managed mutex_init() + * @dev: DRM device + * @lock: lock to be initialized + * + * Returns: + * 0 on success, or a negative errno code otherwise. + * + * This is a &drm_device-managed version of mutex_init(). The initialized + * lock is automatically destroyed on the final drm_dev_put(). + */ +#define drmm_mutex_init(dev, lock) ({ \ + mutex_init(lock); \ + drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \ +}) \ + +#endif diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h new file mode 100644 index 000000000..14eaecb18 --- /dev/null +++ b/include/drm/drm_mipi_dbi.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include <linux/mutex.h> +#include <drm/drm_device.h> +#include <drm/drm_simple_kms_helper.h> + +struct drm_rect; +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI interface + */ +struct mipi_dbi { + /** + * @cmdlock: Command lock + */ + struct mutex cmdlock; + + /** + * @command: Bus specific callback executing commands. + */ + int (*command)(struct mipi_dbi *dbi, u8 *cmd, u8 *param, size_t num); + + /** + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + */ + const u8 *read_commands; + + /** + * @swap_bytes: Swap bytes in buffer before transfer + */ + bool swap_bytes; + + /** + * @reset: Optional reset gpio + */ + struct gpio_desc *reset; + + /* Type C specific */ + + /** + * @spi: SPI device + */ + struct spi_device *spi; + + /** + * @dc: Optional D/C gpio. + */ + struct gpio_desc *dc; + + /** + * @tx_buf9: Buffer used for Option 1 9-bit conversion + */ + void *tx_buf9; + + /** + * @tx_buf9_len: Size of tx_buf9. + */ + size_t tx_buf9_len; +}; + +/** + * struct mipi_dbi_dev - MIPI DBI device + */ +struct mipi_dbi_dev { + /** + * @drm: DRM device + */ + struct drm_device drm; + + /** + * @pipe: Display pipe structure + */ + struct drm_simple_display_pipe pipe; + + /** + * @connector: Connector + */ + struct drm_connector connector; + + /** + * @mode: Fixed display mode + */ + struct drm_display_mode mode; + + /** + * @tx_buf: Buffer used for transfer (copy clip rect area) + */ + u16 *tx_buf; + + /** + * @rotation: initial rotation in degrees Counter Clock Wise + */ + unsigned int rotation; + + /** + * @left_offset: Horizontal offset of the display relative to the + * controller's driver array + */ + unsigned int left_offset; + + /** + * @top_offset: Vertical offset of the display relative to the + * controller's driver array + */ + unsigned int top_offset; + + /** + * @backlight: backlight device (optional) + */ + struct backlight_device *backlight; + + /** + * @regulator: power regulator (optional) + */ + struct regulator *regulator; + + /** + * @dbi: MIPI DBI interface + */ + struct mipi_dbi dbi; + + /** + * @driver_private: Driver private data. + * Necessary for drivers with private data since devm_drm_dev_alloc() + * can't allocate structures that embed a structure which then again + * embeds drm_device. + */ + void *driver_private; +}; + +static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm) +{ + return container_of(drm, struct mipi_dbi_dev, drm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi, + struct gpio_desc *dc); +int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation, size_t tx_buf_size); +int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const struct drm_display_mode *mode, unsigned int rotation); +enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode); +void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plan_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *dbi); +bool mipi_dbi_display_is_on(struct mipi_dbi *dbi); +int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev); +int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev); + +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); +int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz, + u8 bpw, const void *buf, size_t len); + +int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, + size_t len); +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_rect *clip, bool swap); +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @dbi: MIPI DBI structure + * @cmd: Command + * @seq: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(dbi, cmd, seq...) \ +({ \ + const u8 d[] = { seq }; \ + struct device *dev = &(dbi)->spi->dev; \ + int ret; \ + ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ + if (ret) \ + dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \ + ret; \ +}) + +#ifdef CONFIG_DEBUG_FS +void mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {} +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h new file mode 100644 index 000000000..9054a5185 --- /dev/null +++ b/include/drm/drm_mipi_dsi.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI DSI Bus + * + * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + */ + +#ifndef __DRM_MIPI_DSI_H__ +#define __DRM_MIPI_DSI_H__ + +#include <linux/device.h> + +struct mipi_dsi_host; +struct mipi_dsi_device; +struct drm_dsc_picture_parameter_set; + +/* request ACK from peripheral */ +#define MIPI_DSI_MSG_REQ_ACK BIT(0) +/* use Low Power Mode to transmit message */ +#define MIPI_DSI_MSG_USE_LPM BIT(1) + +/** + * struct mipi_dsi_msg - read/write DSI buffer + * @channel: virtual channel id + * @type: payload data type + * @flags: flags controlling this message transmission + * @tx_len: length of @tx_buf + * @tx_buf: data to be written + * @rx_len: length of @rx_buf + * @rx_buf: data to be read, or NULL + */ +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + + size_t tx_len; + const void *tx_buf; + + size_t rx_len; + void *rx_buf; +}; + +bool mipi_dsi_packet_format_is_short(u8 type); +bool mipi_dsi_packet_format_is_long(u8 type); + +/** + * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format + * @size: size (in bytes) of the packet + * @header: the four bytes that make up the header (Data ID, Word Count or + * Packet Data, and ECC) + * @payload_length: number of bytes in the payload + * @payload: a pointer to a buffer containing the payload, if any + */ +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, + const struct mipi_dsi_msg *msg); + +/** + * struct mipi_dsi_host_ops - DSI bus operations + * @attach: attach DSI device to DSI host + * @detach: detach DSI device from DSI host + * @transfer: transmit a DSI packet + * + * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg + * structures. This structure contains information about the type of packet + * being transmitted as well as the transmit and receive buffers. When an + * error is encountered during transmission, this function will return a + * negative error code. On success it shall return the number of bytes + * transmitted for write packets or the number of bytes received for read + * packets. + * + * Note that typically DSI packet transmission is atomic, so the .transfer() + * function will seldomly return anything other than the number of bytes + * contained in the transmit buffer on success. + * + * Also note that those callbacks can be called no matter the state the + * host is in. Drivers that need the underlying device to be powered to + * perform these operations will first need to make sure it's been + * properly enabled. + */ +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + int (*detach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + ssize_t (*transfer)(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +}; + +/** + * struct mipi_dsi_host - DSI host device + * @dev: driver model device node for this DSI host + * @ops: DSI host operations + * @list: list management + */ +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; + struct list_head list; +}; + +int mipi_dsi_host_register(struct mipi_dsi_host *host); +void mipi_dsi_host_unregister(struct mipi_dsi_host *host); +struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); + +/* DSI mode flags */ + +/* video mode */ +#define MIPI_DSI_MODE_VIDEO BIT(0) +/* video burst mode */ +#define MIPI_DSI_MODE_VIDEO_BURST BIT(1) +/* video pulse mode */ +#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2) +/* enable auto vertical count mode */ +#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3) +/* enable hsync-end packets in vsync-pulse and v-porch area */ +#define MIPI_DSI_MODE_VIDEO_HSE BIT(4) +/* disable hfront-porch area */ +#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5) +/* disable hback-porch area */ +#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6) +/* disable hsync-active area */ +#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7) +/* flush display FIFO on vsync pulse */ +#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) +/* disable EoT packets in HS mode */ +#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9) +/* device supports non-continuous clock behavior (DSI spec 5.6.1) */ +#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) +/* transmit data in low power */ +#define MIPI_DSI_MODE_LPM BIT(11) +/* transmit data ending at the same time for all lanes within one hsync */ +#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12) + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888, + MIPI_DSI_FMT_RGB666, + MIPI_DSI_FMT_RGB666_PACKED, + MIPI_DSI_FMT_RGB565, +}; + +#define DSI_DEV_NAME_SIZE 20 + +/** + * struct mipi_dsi_device_info - template for creating a mipi_dsi_device + * @type: DSI peripheral chip type + * @channel: DSI virtual channel assigned to peripheral + * @node: pointer to OF device node or NULL + * + * This is populated and passed to mipi_dsi_device_new to create a new + * DSI device + */ +struct mipi_dsi_device_info { + char type[DSI_DEV_NAME_SIZE]; + u32 channel; + struct device_node *node; +}; + +/** + * struct mipi_dsi_device - DSI peripheral device + * @host: DSI host for this peripheral + * @dev: driver model device node for this peripheral + * @name: DSI peripheral chip type + * @channel: virtual channel assigned to the peripheral + * @format: pixel format for video mode + * @lanes: number of active data lanes + * @mode_flags: DSI operation mode related flags + * @hs_rate: maximum lane frequency for high speed mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers + * @lp_rate: maximum lane frequency for low power mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers + * @dsc: panel/bridge DSC pps payload to be sent + */ +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + + char name[DSI_DEV_NAME_SIZE]; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; + unsigned long hs_rate; + unsigned long lp_rate; + struct drm_dsc_config *dsc; +}; + +#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" + +static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) +{ + return container_of(dev, struct mipi_dsi_device, dev); +} + +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + +struct mipi_dsi_device * +mipi_dsi_device_register_full(struct mipi_dsi_host *host, + const struct mipi_dsi_device_info *info); +void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi); +struct mipi_dsi_device * +devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host, + const struct mipi_dsi_device_info *info); +struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); +int mipi_dsi_attach(struct mipi_dsi_device *dsi); +int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, + u16 value); +ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); +ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, + const struct drm_dsc_picture_parameter_set *pps); + +ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, + size_t size); +ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, + size_t num_params, void *data, size_t size); + +/** + * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode + * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking + * information only + * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both + * V-Blanking and H-Blanking information + */ +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK, +}; + +#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2) +#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3) +#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4) +#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5) +#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6) + +ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, + size_t len); +int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); +int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format); +int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, + enum mipi_dsi_dcs_tear_mode mode); +int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); +int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline); +int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, + u16 *brightness); +int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, + u16 *brightness); + +/** + * mipi_dsi_dcs_write_seq - transmit a DCS command with payload + * @dsi: DSI peripheral device + * @cmd: Command + * @seq: buffer containing data to be transmitted + */ +#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do { \ + static const u8 d[] = { cmd, seq }; \ + struct device *dev = &dsi->dev; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \ + return ret; \ + } \ + } while (0) + +/** + * struct mipi_dsi_driver - DSI driver + * @driver: device driver model driver + * @probe: callback for device binding + * @remove: callback for device unbinding + * @shutdown: called at shutdown time to quiesce the device + */ +struct mipi_dsi_driver { + struct device_driver driver; + int(*probe)(struct mipi_dsi_device *dsi); + void (*remove)(struct mipi_dsi_device *dsi); + void (*shutdown)(struct mipi_dsi_device *dsi); +}; + +static inline struct mipi_dsi_driver * +to_mipi_dsi_driver(struct device_driver *driver) +{ + return container_of(driver, struct mipi_dsi_driver, driver); +} + +static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) +{ + return dev_get_drvdata(&dsi->dev); +} + +static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) +{ + dev_set_drvdata(&dsi->dev, data); +} + +int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver, + struct module *owner); +void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); + +#define mipi_dsi_driver_register(driver) \ + mipi_dsi_driver_register_full(driver, THIS_MODULE) + +#define module_mipi_dsi_driver(__mipi_dsi_driver) \ + module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ + mipi_dsi_driver_unregister) + +#endif /* __DRM_MIPI_DSI__ */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h new file mode 100644 index 000000000..ac33ba1b1 --- /dev/null +++ b/include/drm/drm_mm.h @@ -0,0 +1,553 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> + */ + +#ifndef _DRM_MM_H_ +#define _DRM_MM_H_ + +/* + * Generic range manager structs + */ +#include <linux/bug.h> +#include <linux/rbtree.h> +#include <linux/limits.h> +#include <linux/mm_types.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#ifdef CONFIG_DRM_DEBUG_MM +#include <linux/stackdepot.h> +#endif +#include <linux/types.h> + +#include <drm/drm_print.h> + +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + +/** + * enum drm_mm_insert_mode - control search and allocation behaviour + * + * The &struct drm_mm range manager supports finding a suitable modes using + * a number of search trees. These trees are oranised by size, by address and + * in most recent eviction order. This allows the user to find either the + * smallest hole to reuse, the lowest or highest address to reuse, or simply + * reuse the most recent eviction that fits. When allocating the &drm_mm_node + * from within the hole, the &drm_mm_insert_mode also dictate whether to + * allocate the lowest matching address or the highest. + */ +enum drm_mm_insert_mode { + /** + * @DRM_MM_INSERT_BEST: + * + * Search for the smallest hole (within the search range) that fits + * the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_BEST = 0, + + /** + * @DRM_MM_INSERT_LOW: + * + * Search for the lowest hole (address closest to 0, within the search + * range) that fits the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_LOW, + + /** + * @DRM_MM_INSERT_HIGH: + * + * Search for the highest hole (address closest to U64_MAX, within the + * search range) that fits the desired node. + * + * Allocates the node from the *top* of the found hole. The specified + * alignment for the node is applied to the base of the node + * (&drm_mm_node.start). + */ + DRM_MM_INSERT_HIGH, + + /** + * @DRM_MM_INSERT_EVICT: + * + * Search for the most recently evicted hole (within the search range) + * that fits the desired node. This is appropriate for use immediately + * after performing an eviction scan (see drm_mm_scan_init()) and + * removing the selected nodes to form a hole. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_EVICT, + + /** + * @DRM_MM_INSERT_ONCE: + * + * Only check the first hole for suitablity and report -ENOSPC + * immediately otherwise, rather than check every hole until a + * suitable one is found. Can only be used in conjunction with another + * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. + */ + DRM_MM_INSERT_ONCE = BIT(31), + + /** + * @DRM_MM_INSERT_HIGHEST: + * + * Only check the highest hole (the hole with the largest address) and + * insert the node at the top of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, + + /** + * @DRM_MM_INSERT_LOWEST: + * + * Only check the lowest hole (the hole with the smallest address) and + * insert the node at the bottom of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, +}; + +/** + * struct drm_mm_node - allocated block in the DRM allocator + * + * This represents an allocated block in a &drm_mm allocator. Except for + * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is + * entirely opaque and should only be accessed through the provided funcions. + * Since allocation of these nodes is entirely handled by the driver they can be + * embedded. + */ +struct drm_mm_node { + /** @color: Opaque driver-private tag. */ + unsigned long color; + /** @start: Start address of the allocated block. */ + u64 start; + /** @size: Size of the allocated block. */ + u64 size; + /* private: */ + struct drm_mm *mm; + struct list_head node_list; + struct list_head hole_stack; + struct rb_node rb; + struct rb_node rb_hole_size; + struct rb_node rb_hole_addr; + u64 __subtree_last; + u64 hole_size; + u64 subtree_max_hole; + unsigned long flags; +#define DRM_MM_NODE_ALLOCATED_BIT 0 +#define DRM_MM_NODE_SCANNED_BIT 1 +#ifdef CONFIG_DRM_DEBUG_MM + depot_stack_handle_t stack; +#endif +}; + +/** + * struct drm_mm - DRM allocator + * + * DRM range allocator with a few special functions and features geared towards + * managing GPU memory. Except for the @color_adjust callback the structure is + * entirely opaque and should only be accessed through the provided functions + * and macros. This structure can be embedded into larger driver structures. + */ +struct drm_mm { + /** + * @color_adjust: + * + * Optional driver callback to further apply restrictions on a hole. The + * node argument points at the node containing the hole from which the + * block would be allocated (see drm_mm_hole_follows() and friends). The + * other arguments are the size of the block to be allocated. The driver + * can adjust the start and end as needed to e.g. insert guard pages. + */ + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end); + + /* private: */ + /* List of all memory nodes that immediately precede a free hole. */ + struct list_head hole_stack; + /* head_node.node_list is the list of all memory nodes, ordered + * according to the (increasing) start address of the memory node. */ + struct drm_mm_node head_node; + /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ + struct rb_root_cached interval_tree; + struct rb_root_cached holes_size; + struct rb_root holes_addr; + + unsigned long scan_active; +}; + +/** + * struct drm_mm_scan - DRM allocator eviction roaster data + * + * This structure tracks data needed for the eviction roaster set up using + * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and + * drm_mm_scan_remove_block(). The structure is entirely opaque and should only + * be accessed through the provided functions and macros. It is meant to be + * allocated temporarily by the driver on the stack. + */ +struct drm_mm_scan { + /* private: */ + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + enum drm_mm_insert_mode mode; +}; + +/** + * drm_mm_node_allocated - checks whether a node is allocated + * @node: drm_mm_node to check + * + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @node is allocated. + */ +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) +{ + return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); +} + +/** + * drm_mm_initialized - checks whether an allocator is initialized + * @mm: drm_mm to check + * + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @mm is initialized. + */ +static inline bool drm_mm_initialized(const struct drm_mm *mm) +{ + return READ_ONCE(mm->hole_stack.next); +} + +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. See also drm_mm_hole_node_start() and + * drm_mm_hole_node_end(). + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_size; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + return hole_node->start + hole_node->size; +} + +/** + * drm_mm_hole_node_start - computes the start of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() + * + * Returns: + * Start of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); + return __drm_mm_hole_node_start(hole_node); +} + +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return list_next_entry(hole_node, node_list)->start; +} + +/** + * drm_mm_hole_node_end - computes the end of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). + * + * Returns: + * End of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return __drm_mm_hole_node_end(hole_node); +} + +/** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manager + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** + * drm_mm_for_each_node - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. + */ +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @next: &struct drm_mm_node to store the next step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe(), so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_hole - iterator to walk over all holes + * @pos: &drm_mm_node used internally to track progress + * @mm: &drm_mm allocator to walk + * @hole_start: ulong variable to assign the hole start to on each iteration + * @hole_end: ulong variable to assign the hole end to on each iteration + * + * This iterator walks over all holes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. @entry is used + * internally and will not reflect a real drm_mm_node for the very first hole. + * Hence users of this iterator may not access it. + * + * Implementation Note: + * We need to inline list_for_each_entry in order to be able to set hole_start + * and hole_end on each iteration while keeping the macro sane. + */ +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ + for (pos = list_first_entry(&(mm)->hole_stack, \ + typeof(*pos), hole_stack); \ + &pos->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(pos), \ + hole_end = hole_start + pos->hole_size, \ + 1 : 0; \ + pos = list_next_entry(pos, hole_stack)) + +/* + * Basic range manager support (drm_mm.c) + */ +int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); +int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_insert_node_generic - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for this node + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_insert_node_in_range() with no + * range restrictions applied. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + return drm_mm_insert_node_in_range(mm, node, + size, alignment, color, + 0, U64_MAX, mode); +} + +/** + * drm_mm_insert_node - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size) +{ + return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); +} + +void drm_mm_remove_node(struct drm_mm_node *node); +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); +void drm_mm_takedown(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} + +struct drm_mm_node * +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); + +/** + * drm_mm_for_each_node_in_range - iterator to walk over a range of + * allocated nodes + * @node__: drm_mm_node structure to assign to in each iteration step + * @mm__: drm_mm allocator to walk + * @start__: starting offset, the first node will overlap this + * @end__: ending offset, the last node will start before this (but may overlap) + * + * This iterator walks over all nodes in the range allocator that lie + * between @start and @end. It is implemented similarly to list_for_each(), + * but using the internal interval tree to accelerate the search for the + * starting node, and so not safe against removal of elements. It assumes + * that @end is within (or is the upper limit of) the drm_mm allocator. + * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk + * over the special _unallocated_ &drm_mm.head_node, and may even continue + * indefinitely. + */ +#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ + for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ + node__->start < (end__); \ + node__ = list_next_entry(node__, node_list)) + +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_scan_init_with_range() with no range + * restrictions applied. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, mode); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); + +#endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h new file mode 100644 index 000000000..6b5e01295 --- /dev/null +++ b/include/drm/drm_mode_config.h @@ -0,0 +1,979 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODE_CONFIG_H__ +#define __DRM_MODE_CONFIG_H__ + +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/idr.h> +#include <linux/workqueue.h> +#include <linux/llist.h> + +#include <drm/drm_modeset_lock.h> + +struct drm_file; +struct drm_device; +struct drm_atomic_state; +struct drm_mode_fb_cmd2; +struct drm_format_info; +struct drm_display_mode; + +/** + * struct drm_mode_config_funcs - basic driver provided mode setting functions + * + * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that + * involve drivers. + */ +struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * &struct drm_mode_fb_cmd2 for details. + * + * To validate the pixel format and modifier drivers can use + * drm_any_plane_has_format() to make sure at least one plane supports + * the requested values. Note that the driver must first determine the + * actual modifier used if the request doesn't have it specified, + * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0. + * + * IMPORTANT: These implied modifiers for legacy userspace must be + * stored in struct &drm_framebuffer, including all relevant metadata + * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the + * modifier enables additional planes beyond the fourcc pixel format + * code. This is required by the GETFB2 ioctl. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @get_format_info: + * + * Allows a driver to return custom format information for special + * fb layouts (eg. ones with auxiliary compression control planes). + * + * RETURNS: + * + * The format information specific to the given fb metadata, or + * NULL if none is found. + */ + const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event() + * to call this hook to inform the fbdev helper of output changes. + * + * This hook is deprecated, drivers should instead use + * drm_fbdev_generic_setup() which takes care of any necessary + * hotplug event forwarding already without further involvement by + * the driver. + */ + void (*output_poll_changed)(struct drm_device *dev); + + /** + * @mode_valid: + * + * Device specific validation of display modes. Can be used to reject + * modes that can never be supported. Only device wide constraints can + * be checked here. crtc/encoder/bridge/connector specific constraints + * should be checked in the .mode_valid() hook for each specific object. + */ + enum drm_mode_status (*mode_valid)(struct drm_device *dev, + const struct drm_display_mode *mode); + + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the in this + * callback. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ + int (*atomic_check)(struct drm_device *dev, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Nonblocking commits (as indicated with the nonblock parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Nonblocking commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a &struct drm_event in each CRTC's + * &drm_crtc_state.event. See the documentation for + * &drm_crtc_state.event for more details about the precise semantics of + * this event. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if a nonblocking updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ + int (*atomic_commit)(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ + struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call + * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_free)(struct drm_atomic_state *state); +}; + +/** + * struct drm_mode_config - Mode configuration control structure + * @min_width: minimum fb pixel width on this device + * @min_height: minimum fb pixel height on this device + * @max_width: maximum fb pixel width on this device + * @max_height: maximum fb pixel height on this device + * @funcs: core driver provided mode setting functions + * @fb_base: base address of the framebuffer + * @poll_enabled: track polling support for this device + * @poll_running: track polling status for this device + * @delayed_event: track delayed poll uevent deliver for this device + * @output_poll_work: delayed work for polling in process context + * @preferred_depth: preferred RBG pixel depth, used by fb helpers + * @prefer_shadow: hint to userspace to prefer shadow-fb rendering + * @cursor_width: hint to userspace for max cursor width + * @cursor_height: hint to userspace for max cursor height + * @helper_private: mid-layer private data + * + * Core mode resource tracking structure. All CRTC, encoders, and connectors + * enumerated by the driver are added here, as are global properties. Some + * global restrictions are also here, e.g. dimension restrictions. + * + * Framebuffer sizes refer to the virtual screen that can be displayed by + * the CRTC. This can be different from the physical resolution programmed. + * The minimum width and height, stored in @min_width and @min_height, + * describe the smallest size of the framebuffer. It correlates to the + * minimum programmable resolution. + * The maximum width, stored in @max_width, is typically limited by the + * maximum pitch between two adjacent scanlines. The maximum height, stored + * in @max_height, is usually only limited by the amount of addressable video + * memory. For hardware that has no real maximum, drivers should pick a + * reasonable default. + * + * See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT. + */ +struct drm_mode_config { + /** + * @mutex: + * + * This is the big scary modeset BKL which protects everything that + * isn't protect otherwise. Scope is unclear and fuzzy, try to remove + * anything from under its protection and move it into more well-scoped + * locks. + * + * The one important thing this protects is the use of @acquire_ctx. + */ + struct mutex mutex; + + /** + * @connection_mutex: + * + * This protects connector state and the connector to encoder to CRTC + * routing chain. + * + * For atomic drivers specifically this protects &drm_connector.state. + */ + struct drm_modeset_lock connection_mutex; + + /** + * @acquire_ctx: + * + * Global implicit acquire context used by atomic drivers for legacy + * IOCTLs. Deprecated, since implicit locking contexts make it + * impossible to use driver-private &struct drm_modeset_lock. Users of + * this must hold @mutex. + */ + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @idr_mutex: + * + * Mutex for KMS ID allocation and management. Protects both @object_idr + * and @tile_idr. + */ + struct mutex idr_mutex; + + /** + * @object_idr: + * + * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, + * connector, modes - just makes life easier to have only one. + */ + struct idr object_idr; + + /** + * @tile_idr: + * + * Use this idr for allocating new IDs for tiled sinks like use in some + * high-res DP MST screens. + */ + struct idr tile_idr; + + /** @fb_lock: Mutex to protect fb the global @fb_list and @num_fb. */ + struct mutex fb_lock; + /** @num_fb: Number of entries on @fb_list. */ + int num_fb; + /** @fb_list: List of all &struct drm_framebuffer. */ + struct list_head fb_list; + + /** + * @connector_list_lock: Protects @num_connector and + * @connector_list and @connector_free_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. + */ + int num_connector; + /** + * @connector_ida: ID allocator for connector indices. + */ + struct ida connector_ida; + /** + * @connector_list: + * + * List of connector objects linked with &drm_connector.head. Protected + * by @connector_list_lock. Only use drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to walk this list. + */ + struct list_head connector_list; + /** + * @connector_free_list: + * + * List of connector objects linked with &drm_connector.free_head. + * Protected by @connector_list_lock. Used by + * drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to savely free connectors using + * @connector_free_work. + */ + struct llist_head connector_free_list; + /** + * @connector_free_work: Work to clean up @connector_free_list. + */ + struct work_struct connector_free_work; + + /** + * @num_encoder: + * + * Number of encoders on this device. This is invariant over the + * lifetime of a device and hence doesn't need any locks. + */ + int num_encoder; + /** + * @encoder_list: + * + * List of encoder objects linked with &drm_encoder.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head encoder_list; + + /** + * @num_total_plane: + * + * Number of universal (i.e. with primary/curso) planes on this device. + * This is invariant over the lifetime of a device and hence doesn't + * need any locks. + */ + int num_total_plane; + /** + * @plane_list: + * + * List of plane objects linked with &drm_plane.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head plane_list; + + /** + * @num_crtc: + * + * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime + * of a device and hence doesn't need any locks. + */ + int num_crtc; + /** + * @crtc_list: + * + * List of CRTC objects linked with &drm_crtc.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head crtc_list; + + /** + * @property_list: + * + * List of property type objects linked with &drm_property.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head property_list; + + /** + * @privobj_list: + * + * List of private objects linked with &drm_private_obj.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head privobj_list; + + int min_width, min_height; + int max_width, max_height; + const struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + + /* output poll support */ + bool poll_enabled; + bool poll_running; + bool delayed_event; + struct delayed_work output_poll_work; + + /** + * @blob_lock: + * + * Mutex for blob property allocation and management, protects + * @property_blob_list and &drm_file.blobs. + */ + struct mutex blob_lock; + + /** + * @property_blob_list: + * + * List of all the blob property objects linked with + * &drm_property_blob.head. Protected by @blob_lock. + */ + struct list_head property_blob_list; + + /* pointers to standard properties */ + + /** + * @edid_property: Default connector property to hold the EDID of the + * currently connected sink, if any. + */ + struct drm_property *edid_property; + /** + * @dpms_property: Default connector property to control the + * connector's DPMS state. + */ + struct drm_property *dpms_property; + /** + * @path_property: Default connector property to hold the DP MST path + * for the port. + */ + struct drm_property *path_property; + /** + * @tile_property: Default connector property to store the tile + * position of a tiled screen, for sinks which need to be driven with + * multiple CRTCs. + */ + struct drm_property *tile_property; + /** + * @link_status_property: Default connector property for link status + * of a connector + */ + struct drm_property *link_status_property; + /** + * @plane_type_property: Default plane property to differentiate + * CURSOR, PRIMARY and OVERLAY legacy uses of planes. + */ + struct drm_property *plane_type_property; + /** + * @prop_src_x: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_x; + /** + * @prop_src_y: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_y; + /** + * @prop_src_w: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_w; + /** + * @prop_src_h: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_h; + /** + * @prop_crtc_x: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_x; + /** + * @prop_crtc_y: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_y; + /** + * @prop_crtc_w: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_w; + /** + * @prop_crtc_h: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_h; + /** + * @prop_fb_id: Default atomic plane property to specify the + * &drm_framebuffer. + */ + struct drm_property *prop_fb_id; + /** + * @prop_in_fence_fd: Sync File fd representing the incoming fences + * for a Plane. + */ + struct drm_property *prop_in_fence_fd; + /** + * @prop_out_fence_ptr: Sync File fd pointer representing the + * outgoing fences for a CRTC. Userspace should provide a pointer to a + * value of type s32, and then cast that pointer to u64. + */ + struct drm_property *prop_out_fence_ptr; + /** + * @prop_crtc_id: Default atomic plane property to specify the + * &drm_crtc. + */ + struct drm_property *prop_crtc_id; + /** + * @prop_fb_damage_clips: Optional plane property to mark damaged + * regions on the plane in framebuffer coordinates of the framebuffer + * attached to the plane. + * + * The layout of blob data is simply an array of &drm_mode_rect. Unlike + * plane src coordinates, damage clips are not in 16.16 fixed point. + */ + struct drm_property *prop_fb_damage_clips; + /** + * @prop_active: Default atomic CRTC property to control the active + * state, which is the simplified implementation for DPMS in atomic + * drivers. + */ + struct drm_property *prop_active; + /** + * @prop_mode_id: Default atomic CRTC property to set the mode for a + * CRTC. A 0 mode implies that the CRTC is entirely disabled - all + * connectors must be of and active must be set to disabled, too. + */ + struct drm_property *prop_mode_id; + /** + * @prop_vrr_enabled: Default atomic CRTC property to indicate + * whether variable refresh rate should be enabled on the CRTC. + */ + struct drm_property *prop_vrr_enabled; + + /** + * @dvi_i_subconnector_property: Optional DVI-I property to + * differentiate between analog or digital mode. + */ + struct drm_property *dvi_i_subconnector_property; + /** + * @dvi_i_select_subconnector_property: Optional DVI-I property to + * select between analog or digital mode. + */ + struct drm_property *dvi_i_select_subconnector_property; + + /** + * @dp_subconnector_property: Optional DP property to differentiate + * between different DP downstream port types. + */ + struct drm_property *dp_subconnector_property; + + /** + * @tv_subconnector_property: Optional TV property to differentiate + * between different TV connector types. + */ + struct drm_property *tv_subconnector_property; + /** + * @tv_select_subconnector_property: Optional TV property to select + * between different TV connector types. + */ + struct drm_property *tv_select_subconnector_property; + /** + * @tv_mode_property: Optional TV property to select + * the output TV mode. + */ + struct drm_property *tv_mode_property; + /** + * @tv_left_margin_property: Optional TV property to set the left + * margin (expressed in pixels). + */ + struct drm_property *tv_left_margin_property; + /** + * @tv_right_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_right_margin_property; + /** + * @tv_top_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_top_margin_property; + /** + * @tv_bottom_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_bottom_margin_property; + /** + * @tv_brightness_property: Optional TV property to set the + * brightness. + */ + struct drm_property *tv_brightness_property; + /** + * @tv_contrast_property: Optional TV property to set the + * contrast. + */ + struct drm_property *tv_contrast_property; + /** + * @tv_flicker_reduction_property: Optional TV property to control the + * flicker reduction mode. + */ + struct drm_property *tv_flicker_reduction_property; + /** + * @tv_overscan_property: Optional TV property to control the overscan + * setting. + */ + struct drm_property *tv_overscan_property; + /** + * @tv_saturation_property: Optional TV property to set the + * saturation. + */ + struct drm_property *tv_saturation_property; + /** + * @tv_hue_property: Optional TV property to set the hue. + */ + struct drm_property *tv_hue_property; + + /** + * @scaling_mode_property: Optional connector property to control the + * upscaling, mostly used for built-in panels. + */ + struct drm_property *scaling_mode_property; + /** + * @aspect_ratio_property: Optional connector property to control the + * HDMI infoframe aspect ratio setting. + */ + struct drm_property *aspect_ratio_property; + /** + * @content_type_property: Optional connector property to control the + * HDMI infoframe content type setting. + */ + struct drm_property *content_type_property; + /** + * @degamma_lut_property: Optional CRTC property to set the LUT used to + * convert the framebuffer's colors to linear gamma. + */ + struct drm_property *degamma_lut_property; + /** + * @degamma_lut_size_property: Optional CRTC property for the size of + * the degamma LUT as supported by the driver (read-only). + */ + struct drm_property *degamma_lut_size_property; + /** + * @ctm_property: Optional CRTC property to set the + * matrix used to convert colors after the lookup in the + * degamma LUT. + */ + struct drm_property *ctm_property; + /** + * @gamma_lut_property: Optional CRTC property to set the LUT used to + * convert the colors, after the CTM matrix, to the gamma space of the + * connected screen. + */ + struct drm_property *gamma_lut_property; + /** + * @gamma_lut_size_property: Optional CRTC property for the size of the + * gamma LUT as supported by the driver (read-only). + */ + struct drm_property *gamma_lut_size_property; + + /** + * @suggested_x_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_x_property; + /** + * @suggested_y_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_y_property; + + /** + * @non_desktop_property: Optional connector property with a hint + * that device isn't a standard display, and the console/desktop, + * should not be displayed on it. + */ + struct drm_property *non_desktop_property; + + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + + /** + * @writeback_fb_id_property: Property for writeback connectors, storing + * the ID of the output framebuffer. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_fb_id_property; + + /** + * @writeback_pixel_formats_property: Property for writeback connectors, + * storing an array of the supported pixel formats for the writeback + * engine (read-only). + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_pixel_formats_property; + /** + * @writeback_out_fence_ptr_property: Property for writeback connectors, + * fd pointer representing the outgoing fences for a writeback + * connector. Userspace should provide a pointer to a value of type s32, + * and then cast that pointer to u64. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_out_fence_ptr_property; + + /** + * @hdr_output_metadata_property: Connector property containing hdr + * metatada. This will be provided by userspace compositors based + * on HDR content + */ + struct drm_property *hdr_output_metadata_property; + + /** + * @content_protection_property: DRM ENUM property for content + * protection. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *content_protection_property; + + /** + * @hdcp_content_type_property: DRM ENUM property for type of + * Protected Content. + */ + struct drm_property *hdcp_content_type_property; + + /* dumb ioctl parameters */ + uint32_t preferred_depth, prefer_shadow; + + /** + * @prefer_shadow_fbdev: + * + * Hint to framebuffer emulation to prefer shadow-fb rendering. + */ + bool prefer_shadow_fbdev; + + /** + * @quirk_addfb_prefer_xbgr_30bpp: + * + * Special hack for legacy ADDFB to keep nouveau userspace happy. Should + * only ever be set by the nouveau kernel driver. + */ + bool quirk_addfb_prefer_xbgr_30bpp; + + /** + * @quirk_addfb_prefer_host_byte_order: + * + * When set to true drm_mode_addfb() will pick host byte order + * pixel_format when calling drm_mode_addfb2(). This is how + * drm_mode_addfb() should have worked from day one. It + * didn't though, so we ended up with quirks in both kernel + * and userspace drivers to deal with the broken behavior. + * Simply fixing drm_mode_addfb() unconditionally would break + * these drivers, so add a quirk bit here to allow drivers + * opt-in. + */ + bool quirk_addfb_prefer_host_byte_order; + + /** + * @async_page_flip: Does this device support async flips on the primary + * plane? + */ + bool async_page_flip; + + /** + * @fb_modifiers_not_supported: + * + * When this flag is set, the DRM device will not expose modifier + * support to userspace. This is only used by legacy drivers that infer + * the buffer layout through heuristics without using modifiers. New + * drivers shall not set fhis flag. + */ + bool fb_modifiers_not_supported; + + /** + * @normalize_zpos: + * + * If true the drm core will call drm_atomic_normalize_zpos() as part of + * atomic mode checking from drm_atomic_helper_check() + */ + bool normalize_zpos; + + /** + * @modifiers_property: Plane property to list support modifier/format + * combination. + */ + struct drm_property *modifiers_property; + + /* cursor size */ + uint32_t cursor_width, cursor_height; + + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + + const struct drm_mode_config_helper_funcs *helper_private; +}; + +int __must_check drmm_mode_config_init(struct drm_device *dev); + +/** + * drm_mode_config_init - DRM mode_configuration structure initialization + * @dev: DRM device + * + * This is the unmanaged version of drmm_mode_config_init() for drivers which + * still explicitly call drm_mode_config_cleanup(). + * + * FIXME: This function is deprecated and drivers should be converted over to + * drmm_mode_config_init(). + */ +static inline int drm_mode_config_init(struct drm_device *dev) +{ + return drmm_mode_config_init(dev); +} + +void drm_mode_config_reset(struct drm_device *dev); +void drm_mode_config_cleanup(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h new file mode 100644 index 000000000..912f1e415 --- /dev/null +++ b/include/drm/drm_mode_object.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODESET_H__ +#define __DRM_MODESET_H__ + +#include <linux/kref.h> +#include <drm/drm_lease.h> +struct drm_object_properties; +struct drm_property; +struct drm_device; +struct drm_file; + +/** + * struct drm_mode_object - base structure for modeset objects + * @id: userspace visible identifier + * @type: type of the object, one of DRM_MODE_OBJECT\_\* + * @properties: properties attached to this object, including values + * @refcount: reference count for objects which with dynamic lifetime + * @free_cb: free function callback, only set for objects with dynamic lifetime + * + * Base structure for modeset objects visible to userspace. Objects can be + * looked up using drm_mode_object_find(). Besides basic uapi interface + * properties like @id and @type it provides two services: + * + * - It tracks attached properties and their values. This is used by &drm_crtc, + * &drm_plane and &drm_connector. Properties are attached by calling + * drm_object_attach_property() before the object is visible to userspace. + * + * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it + * provides reference counting through drm_mode_object_get() and + * drm_mode_object_put(). This is used by &drm_framebuffer, &drm_connector + * and &drm_property_blob. These objects provide specialized reference + * counting wrappers. + */ +struct drm_mode_object { + uint32_t id; + uint32_t type; + struct drm_object_properties *properties; + struct kref refcount; + void (*free_cb)(struct kref *kref); +}; + +#define DRM_OBJECT_MAX_PROPERTY 24 +/** + * struct drm_object_properties - property tracking for &drm_mode_object + */ +struct drm_object_properties { + /** + * @count: number of valid properties, must be less than or equal to + * DRM_OBJECT_MAX_PROPERTY. + */ + + int count; + /** + * @properties: Array of pointers to &drm_property. + * + * NOTE: if we ever start dynamically destroying properties (ie. + * not at drm_mode_config_cleanup() time), then we'd have to do + * a better job of detaching property from mode objects to avoid + * dangling property pointers: + */ + struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY]; + + /** + * @values: Array to store the property values, matching @properties. Do + * not read/write values directly, but use + * drm_object_property_get_value() and drm_object_property_set_value(). + * + * Note that atomic drivers do not store mutable properties in this + * array, but only the decoded values in the corresponding state + * structure. The decoding is done using the &drm_crtc.atomic_get_property and + * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For + * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and + * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector + * the hooks are &drm_connector_funcs.atomic_get_property and + * &drm_connector_funcs.atomic_set_property . + * + * Hence atomic drivers should not use drm_object_property_set_value() + * and drm_object_property_get_value() on mutable objects, i.e. those + * without the DRM_MODE_PROP_IMMUTABLE flag set. + * + * For atomic drivers the default value of properties is stored in this + * array, so drm_object_property_get_default_value can be used to + * retrieve it. + */ + uint64_t values[DRM_OBJECT_MAX_PROPERTY]; +}; + +/* Avoid boilerplate. I'm tired of typing. */ +#define DRM_ENUM_NAME_FN(fnname, list) \ + const char *fnname(int val) \ + { \ + int i; \ + for (i = 0; i < ARRAY_SIZE(list); i++) { \ + if (list[i].type == val) \ + return list[i].name; \ + } \ + return "(unknown)"; \ + } + +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type); +void drm_mode_object_get(struct drm_mode_object *obj); +void drm_mode_object_put(struct drm_mode_object *obj); + +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t val); +int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *value); +int drm_object_property_get_default_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *val); + +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val); + +bool drm_mode_object_lease_required(uint32_t type); +#endif diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h new file mode 100644 index 000000000..b0c680e6f --- /dev/null +++ b/include/drm/drm_modes.h @@ -0,0 +1,556 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_MODES_H__ +#define __DRM_MODES_H__ + +#include <linux/hdmi.h> + +#include <drm/drm_mode_object.h> +#include <drm/drm_connector.h> + +struct videomode; + +/* + * Note on terminology: here, for brevity and convenience, we refer to connector + * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, + * DVI, etc. And 'screen' refers to the whole of the visible display, which + * may span multiple monitors (and therefore multiple CRTC and connector + * structures). + */ + +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal vertical timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_NO_420: ycbcr 420 modes not supported + * @MODE_STALE: mode has become stale + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ +enum drm_mode_status { + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_NO_420, + MODE_STALE = -3, + MODE_BAD = -2, + MODE_ERROR = -1 +}; + +#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ + .name = nm, .status = 0, .type = (t), .clock = (c), \ + .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ + .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ + .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ + .vscan = (vs), .flags = (f) + +/** + * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI + * @res: The resolution in pixel + * @dpi: The number of dots per inch + */ +#define DRM_MODE_RES_MM(res, dpi) \ + (((res) * 254ul) / ((dpi) * 10ul)) + +#define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \ + .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \ + .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \ + .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \ + .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \ + .height_mm = (vd_mm) + +/** + * DRM_MODE_INIT - Initialize display mode + * @hz: Vertical refresh rate in Hertz + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro initializes a &drm_display_mode that contains information about + * refresh rate, resolution and physical size. + */ +#define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \ + __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm) + +/** + * DRM_SIMPLE_MODE - Simple display mode + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro initializes a &drm_display_mode that only contains info about + * resolution and physical size. + */ +#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \ + __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm) + +#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ +#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ +#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ +#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ +#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) + +#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF + +#define DRM_MODE_MATCH_TIMINGS (1 << 0) +#define DRM_MODE_MATCH_CLOCK (1 << 1) +#define DRM_MODE_MATCH_FLAGS (1 << 2) +#define DRM_MODE_MATCH_3D_FLAGS (1 << 3) +#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) + +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * This is the kernel API display mode information structure. For the + * user-space version see struct drm_mode_modeinfo. + * + * The horizontal and vertical timings are defined per the following diagram. + * + * :: + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + * + * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). + */ +struct drm_display_mode { + /** + * @clock: + * + * Pixel clock in kHz. + */ + int clock; /* in kHz */ + u16 hdisplay; + u16 hsync_start; + u16 hsync_end; + u16 htotal; + u16 hskew; + u16 vdisplay; + u16 vsync_start; + u16 vsync_end; + u16 vtotal; + u16 vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: <deprecated> + * - DRM_MODE_FLAG_PIXMUX: <deprecated> + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ + u32 flags; + + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; + u16 crtc_hdisplay; + u16 crtc_hblank_start; + u16 crtc_hblank_end; + u16 crtc_hsync_start; + u16 crtc_hsync_end; + u16 crtc_htotal; + u16 crtc_hskew; + u16 crtc_vdisplay; + u16 crtc_vblank_start; + u16 crtc_vblank_end; + u16 crtc_vsync_start; + u16 crtc_vsync_end; + u16 crtc_vtotal; + + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 height_mm; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel + * command line. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI. + * We no longer accept modes with these types though: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. + * Use DRM_MODE_TYPE_DRIVER instead. + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + */ + u8 type; + + /** + * @expose_to_userspace: + * + * Indicates whether the mode is to be exposed to the userspace. + * This is to maintain a set of exposed modes while preparing + * user-mode's list in drm_mode_getconnector ioctl. The purpose of + * this only lies in the ioctl function, and is not to be used + * outside the function. + */ + bool expose_to_userspace; + + /** + * @head: + * + * struct list_head for mode lists. + */ + struct list_head head; + + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ + enum drm_mode_status status; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ + enum hdmi_picture_aspect picture_aspect_ratio; + +}; + +/** + * DRM_MODE_FMT - printf string for &struct drm_display_mode + */ +#define DRM_MODE_FMT "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" + +/** + * DRM_MODE_ARG - printf arguments for &struct drm_display_mode + * @m: display mode + */ +#define DRM_MODE_ARG(m) \ + (m)->name, drm_mode_vrefresh(m), (m)->clock, \ + (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ + (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ + (m)->type, (m)->flags + +#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) + +/** + * drm_mode_is_stereo - check for stereo mode flags + * @mode: drm_display_mode to check + * + * Returns: + * True if the mode is one of the stereo modes (like side-by-side), false if + * not. + */ +static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) +{ + return mode->flags & DRM_MODE_FLAG_3D_MASK; +} + +struct drm_connector; +struct drm_cmdline_mode; + +struct drm_display_mode *drm_mode_create(struct drm_device *dev); +void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); +void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, + const struct drm_display_mode *in); +int drm_mode_convert_umode(struct drm_device *dev, + struct drm_display_mode *out, + const struct drm_mode_modeinfo *in); +void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); +void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode); + +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool reduced, bool interlaced, + bool margins); +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); +struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, + int hdisplay, int vdisplay, + int vrefresh, bool interlaced, + int margins, + int GTF_M, int GTF_2C, + int GTF_K, int GTF_2J); +void drm_display_mode_from_videomode(const struct videomode *vm, + struct drm_display_mode *dmode); +void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, + struct videomode *vm); +void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); + +#if defined(CONFIG_OF) +int of_get_drm_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags, + int index); +int of_get_drm_panel_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags); +#else +static inline int of_get_drm_display_mode(struct device_node *np, + struct drm_display_mode *dmode, + u32 *bus_flags, int index) +{ + return -EINVAL; +} + +static inline int of_get_drm_panel_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags) +{ + return -EINVAL; +} +#endif + +void drm_mode_set_name(struct drm_display_mode *mode); +int drm_mode_vrefresh(const struct drm_display_mode *mode); +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); + +void drm_mode_set_crtcinfo(struct drm_display_mode *p, + int adjust_flags); +void drm_mode_copy(struct drm_display_mode *dst, + const struct drm_display_mode *src); +void drm_mode_init(struct drm_display_mode *dst, + const struct drm_display_mode *src); +struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, + const struct drm_display_mode *mode); +bool drm_mode_match(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2, + unsigned int match_flags); +bool drm_mode_equal(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); + +/* for use by the crtc helper probe functions */ +enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev, + const struct drm_display_mode *mode); +enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, + int maxX, int maxY); +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector); +void drm_mode_prune_invalid(struct drm_device *dev, + struct list_head *mode_list, bool verbose); +void drm_mode_sort(struct list_head *mode_list); +void drm_connector_list_update(struct drm_connector *connector); + +/* parsing cmdline modes */ +bool +drm_mode_parse_command_line_for_connector(const char *mode_option, + const struct drm_connector *connector, + struct drm_cmdline_mode *mode); +struct drm_display_mode * +drm_mode_create_from_cmdline_mode(struct drm_device *dev, + struct drm_cmdline_mode *cmd); + +#endif /* __DRM_MODES_H__ */ diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h new file mode 100644 index 000000000..995fd981c --- /dev/null +++ b/include/drm/drm_modeset_helper.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_KMS_HELPER_H__ +#define __DRM_KMS_HELPER_H__ + +struct drm_crtc; +struct drm_crtc_funcs; +struct drm_device; +struct drm_framebuffer; +struct drm_mode_fb_cmd2; + +void drm_helper_move_panel_connectors_to_head(struct drm_device *); + +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs); + +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 000000000..fafa70ac1 --- /dev/null +++ b/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,1427 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; +struct drm_writeback_connector; +struct drm_writeback_job; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_crtc_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @atomic_enable and @atomic_disable should be used. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_disable + * should be used. + */ + void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_enable + * should be used. + */ + void (*commit)(struct drm_crtc *crtc); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * crtc. This should be implemented if the crtc has some sort of + * restriction in the modes it can display. For example, a given crtc + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup + * vfunc. If the CRTC cannot support the requested conversion from mode + * to adjusted_mode it should reject the modeset. See also + * &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback, but note that they're not + * perfectly equivalent: @mode_valid is called from + * drm_atomic_helper_check_modeset(), but @atomic_check is called from + * drm_atomic_helper_check_planes(), because originally it was meant for + * plane update checks only. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @atomic_enable callback. + * + * This callback is optional. + */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the @mode_set callback. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @atomic_enable for atomic drivers. + * Atomic drivers should consider to use @atomic_disable instead of + * this one. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_crtc *crtc); + + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() this hook is called + * after the &drm_plane_helper_funcs.atomic_check hook for planes, which + * allows drivers to assign shared resources requested by planes in this + * callback here. For more complicated dependencies the driver can call + * the provided check helpers multiple times until the computed state + * has a final configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state object passed-in. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_atomic_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_atomic_state *state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the CRTC level. To ensure that runtime + * PM handling (using either DPMS or the new "ACTIVE" property) works + * @atomic_enable must be the inverse of @atomic_disable for atomic + * drivers. + * + * This function is optional. + */ + void (*atomic_enable)(struct drm_crtc *crtc, + struct drm_atomic_state *state); + + /** + * @atomic_disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers. Atomic drivers don't + * need to implement it if there's no need to disable anything at the + * CRTC level. + * + * This function is optional. + */ + void (*atomic_disable)(struct drm_crtc *crtc, + struct drm_atomic_state *state); + + /** + * @get_scanout_position: + * + * Called by vblank timestamping code. + * + * Returns the current display scanout position from a CRTC and an + * optional accurate ktime_get() timestamp of when the position was + * measured. Note that this is a helper callback which is only used + * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp() + * for the @drm_crtc_funcs.get_vblank_timestamp callback. + * + * Parameters: + * + * crtc: + * The CRTC. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq + * quirks if the flag is set. + * vpos: + * Target location for current vertical scanout position. + * hpos: + * Target location for current horizontal scanout position. + * stime: + * Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * etime: + * Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * mode: + * Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * Returns: + * + * True on success, false if a reliable scanout position counter could + * not be read out. + */ + bool (*get_scanout_position)(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * encoder. This should be implemented if the encoder has some sort + * of restriction in the modes it can display. For example, a given + * encoder may be responsible to set a clock value. If the clock can + * not produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_encoder *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. See + * also &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * This hook is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. If @atomic_check is used, + * this hook isn't called since @atomic_check allows a strict superset + * of the functionality of @mode_fixup. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + * + * NOTE: + * + * If the driver uses the atomic modeset helpers and needs to inspect + * the connector state or connector display info during mode setting, + * @atomic_mode_set can be used instead. + */ + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @atomic_mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspended using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used by the atomic modeset helpers in place of the + * @mode_set callback, if set by the driver. It is optional and should + * be used instead of @mode_set if the driver needs to inspect the + * connector state or display info, since there is no direct way to + * go from the encoder to the current connector. + */ + void (*atomic_mode_set)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + + /** + * @atomic_disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.atomic_disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @disable that provides the atomic state + * to the driver. If @atomic_disable is implemented, @disable is not + * called by the helpers. + * + * This hook is only used by atomic helpers. Atomic drivers don't need + * to implement it if there's no need to disable anything at the encoder + * level. To ensure that runtime PM handling (using either DPMS or the + * new "ACTIVE" property) works @atomic_disable must be the inverse of + * @atomic_enable. + */ + void (*atomic_disable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the encoder. It is called + * after this encoder's CRTC has been enabled using their own + * &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @enable that provides the atomic state + * to the driver. If @atomic_enable is implemented, @enable is not + * called by the helpers. + * + * This hook is only used by atomic helpers, it is the opposite of + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the encoder level. To ensure that + * runtime PM handling works @atomic_enable must be the inverse of + * @atomic_disable. + */ + void (*atomic_enable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * For atomic drivers also consider @atomic_disable and save yourself + * from having to read the NOTE below! + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_encoder *encoder); + + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * their own &drm_crtc_helper_funcs.enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is only used by atomic helpers, it is the opposite of + * @disable. Atomic drivers don't need to implement it if there's no + * need to enable anything at the encoder level. To ensure that + * runtime PM handling (using either DPMS or the new "ACTIVE" property) + * works @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_encoder *encoder); + + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * Since this provides a strict superset of the functionality of + * @mode_fixup (the requested and adjusted modes are both available + * through the passed in &struct drm_crtc_state) @mode_fixup is not + * called when @atomic_check is implemented. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for an encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. + */ +struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the &drm_connector.probed_modes list. It should also update the + * EDID property by calling drm_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that drive a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the &drm_connector.display_info, + * &drm_connector.width_mm and &drm_connector.height_mm fields are + * filled in. + * + * Note that the caller function will automatically add standard VESA + * DMT modes up to 1024x768 if the .get_modes() helper operation returns + * no mode and if the connector status is connector_status_connected or + * connector_status_unknown. There is no need to call + * drm_add_modes_noedid() manually in that case. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * This function is only called after the @detect hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_connector *connector); + + /** + * @detect_ctx: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * This is the atomic version of &drm_connector_funcs.detect. + * + * To avoid races against concurrent connector state updates, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to this ctx. This allows taking additional + * locks as required. + * + * RETURNS: + * + * &drm_connector_status indicating the connector's status, + * or the error code returned by drm_modeset_lock(), -EDEADLK. + */ + int (*detect_ctx)(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional. + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECTOR IOCTL. Compared to &drm_encoder_helper_funcs.mode_valid, + * &drm_crtc_helper_funcs.mode_valid and &drm_bridge_funcs.mode_valid, + * which are also called by the atomic helpers from + * drm_atomic_helper_check_modeset(). This allows userspace to force and + * ignore sink constraint (like the pixel clock limits in the screen's + * EDID), which is useful for e.g. testing, or working around a broken + * EDID. Any source hardware constraint (which always need to be + * enforced) therefore should be checked in one of the above callbacks, + * and not this one here. + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum + * drm_mode_status. + */ + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + + /** + * @mode_valid_ctx: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional, and is the atomic version of + * &drm_connector_helper_funcs.mode_valid. + * + * To allow for accessing the atomic state of modesetting objects, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to @ctx. This allows for taking additional + * locks as required. + * + * Even though additional locks may be acquired, this callback is + * still expected not to take any constraints into account which would + * be influenced by the currently set display state - such constraints + * should be handled in the driver's atomic check. For example, if a + * connector shares display bandwidth with other connectors then it + * would be ok to validate the minimum bandwidth requirement of a mode + * against the maximum possible bandwidth of the connector. But it + * wouldn't be ok to take the current bandwidth usage of other + * connectors into account, as this would change depending on the + * display state. + * + * Returns: + * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote + * the &enum drm_mode_status value to @status, or a negative error + * code otherwise. + * + */ + int (*mode_valid_ctx)(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status); + + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * You can leave this function to NULL if the connector is only + * attached to a single encoder. In this case, the core will call + * drm_connector_get_single_encoder() for you. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset(). + * If it is not implemented, the core will fallback to @best_encoder + * (or drm_connector_get_single_encoder() if @best_encoder is NULL). + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the + * &drm_atomic_state update tracking structure passed in. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_atomic_state *state); + + /** + * @atomic_check: + * + * This hook is used to validate connector state. This function is + * called from &drm_atomic_helper_check_modeset, and is called when + * a connector property is set, or a modeset on the crtc is forced. + * + * Because &drm_atomic_helper_check_modeset may be called multiple times, + * this function should handle being called multiple times as well. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_connector *connector, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This hook is to be used by drivers implementing writeback connectors + * that need a point when to commit the writeback job to the hardware. + * The writeback_job to commit is available in the new connector state, + * in &drm_connector_state.writeback_job. + * + * This hook is optional. + * + * This callback is used by the atomic modeset helpers. + */ + void (*atomic_commit)(struct drm_connector *connector, + struct drm_atomic_state *state); + + /** + * @prepare_writeback_job: + * + * As writeback jobs contain a framebuffer, drivers may need to + * prepare and clean them up the same way they can prepare and + * clean up framebuffers for planes. This optional connector operation + * is used to support the preparation of writeback jobs. The job + * prepare operation is called from drm_atomic_helper_prepare_planes() + * for struct &drm_writeback_connector connectors only. + * + * This operation is optional. + * + * This callback is used by the atomic modeset helpers. + */ + int (*prepare_writeback_job)(struct drm_writeback_connector *connector, + struct drm_writeback_job *job); + /** + * @cleanup_writeback_job: + * + * This optional connector operation is used to support the + * cleanup of writeback jobs. The job cleanup operation is called + * from the existing drm_writeback_cleanup_job() function, invoked + * both when destroying the job as part of an aborted commit, or when + * the job completes. + * + * This operation is optional. + * + * This callback is used by the atomic modeset helpers. + */ + void (*cleanup_writeback_job)(struct drm_writeback_connector *connector, + struct drm_writeback_job *job); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for planes + * + * These functions are used by the atomic helpers and by the transitional plane + * helpers. + */ +struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * its backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the &drm_plane_state.fence of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook + * set drm_gem_plane_helper_prepare_fb() is called automatically to + * implement this. Other drivers which need additional plane processing + * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb + * hook. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ + int (*prepare_fb)(struct drm_plane *plane, + struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*cleanup_fb)(struct drm_plane *plane, + struct drm_plane_state *old_state); + + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's @atomic_check + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the + * &drm_atomic_state update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_plane *plane, + struct drm_atomic_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_update)(struct drm_plane *plane, + struct drm_atomic_state *state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the + * &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * &drm_crtc_helper_funcs.disable hook. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_disable)(struct drm_plane *plane, + struct drm_atomic_state *state); + + /** + * @atomic_async_check: + * + * Drivers should set this function pointer to check if the plane's + * atomic state can be updated in a async fashion. Here async means + * "not vblank synchronized". + * + * This hook is called by drm_atomic_async_check() to establish if a + * given update can be committed asynchronously, that is, if it can + * jump ahead of the state currently queued for update. + * + * RETURNS: + * + * Return 0 on success and any error returned indicates that the update + * can not be applied in asynchronous manner. + */ + int (*atomic_async_check)(struct drm_plane *plane, + struct drm_atomic_state *state); + + /** + * @atomic_async_update: + * + * Drivers should set this function pointer to perform asynchronous + * updates of planes, that is, jump ahead of the currently queued + * state and update the plane. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_helper_async_commit(). + * + * An async update will happen on legacy cursor updates. An async + * update won't happen if there is an outstanding commit modifying + * the same plane. + * + * When doing async_update drivers shouldn't replace the + * &drm_plane_state but update the current one with the new plane + * configurations in the new plane_state. + * + * Drivers should also swap the framebuffers between current plane + * state (&drm_plane.state) and new_state. + * This is required since cleanup for async commits is performed on + * the new state, rather than old state like for traditional commits. + * Since we want to give up the reference on the current (old) fb + * instead of our brand new one, swap them in the driver during the + * async commit. + * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet + * - Some hw might still scan out the old buffer until the next + * vblank, however we let go of the fb references as soon as + * we run this hook. For now drivers must implement their own workers + * for deferring if needed, until a common solution is created. + */ + void (*atomic_async_update)(struct drm_plane *plane, + struct drm_atomic_state *state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +/** + * struct drm_mode_config_helper_funcs - global modeset helper operations + * + * These helper functions are used by the atomic helpers. + */ +struct drm_mode_config_helper_funcs { + /** + * @atomic_commit_tail: + * + * This hook is used by the default atomic_commit() hook implemented in + * drm_atomic_helper_commit() together with the nonblocking commit + * helpers (see drm_atomic_helper_setup_commit() for a starting point) + * to implement blocking and nonblocking commits easily. It is not used + * by the atomic helpers + * + * This function is called when the new atomic state has already been + * swapped into the various state pointers. The passed in state + * therefore contains copies of the old/previous state. This hook should + * commit the new state into hardware. Note that the helpers have + * already waited for preceeding atomic commits and fences, but drivers + * can add more waiting calls at the start of their implementation, e.g. + * to wait for driver-internal request for implicit syncing, before + * starting to commit the update to the hardware. + * + * After the atomic update is committed to the hardware this hook needs + * to call drm_atomic_helper_commit_hw_done(). Then wait for the update + * to be executed by the hardware, for example using + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(), and then clean up the old + * framebuffers using drm_atomic_helper_cleanup_planes(). + * + * When disabling a CRTC this hook _must_ stall for the commit to + * complete. Vblank waits don't work on disabled CRTC, hence the core + * can't take care of this. And it also can't rely on the vblank event, + * since that can be signalled already when the screen shows black, + * which can happen much earlier than the last hardware access needed to + * shut off the display pipeline completely. + * + * This hook is optional, the default implementation is + * drm_atomic_helper_commit_tail(). + */ + void (*atomic_commit_tail)(struct drm_atomic_state *state); + + /** + * @atomic_commit_setup: + * + * This hook is used by the default atomic_commit() hook implemented in + * drm_atomic_helper_commit() together with the nonblocking helpers (see + * drm_atomic_helper_setup_commit()) to extend the DRM commit setup. It + * is not used by the atomic helpers. + * + * This function is called at the end of + * drm_atomic_helper_setup_commit(), so once the commit has been + * properly setup across the generic DRM object states. It allows + * drivers to do some additional commit tracking that isn't related to a + * CRTC, plane or connector, tracked in a &drm_private_obj structure. + * + * Note that the documentation of &drm_private_obj has more details on + * how one should implement this. + * + * This hook is optional. + */ + int (*atomic_commit_setup)(struct drm_atomic_state *state); +}; + +#endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h new file mode 100644 index 000000000..ec4f543c3 --- /dev/null +++ b/include/drm/drm_modeset_lock.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_MODESET_LOCK_H_ +#define DRM_MODESET_LOCK_H_ + +#include <linux/types.h> /* stackdepot.h is not self-contained */ +#include <linux/stackdepot.h> +#include <linux/ww_mutex.h> + +struct drm_modeset_lock; + +/** + * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) + * @ww_ctx: base acquire ctx + * @contended: used internally for -EDEADLK handling + * @stack_depot: used internally for contention debugging + * @locked: list of held locks + * @trylock_only: trylock mode used in atomic contexts/panic notifiers + * @interruptible: whether interruptible locking should be used. + * + * Each thread competing for a set of locks must use one acquire + * ctx. And if any lock fxn returns -EDEADLK, it must backoff and + * retry. + */ +struct drm_modeset_acquire_ctx { + + struct ww_acquire_ctx ww_ctx; + + /* + * Contended lock: if a lock is contended you should only call + * drm_modeset_backoff() which drops locks and slow-locks the + * contended lock. + */ + struct drm_modeset_lock *contended; + + /* + * Stack depot for debugging when a contended lock was not backed off + * from. + */ + depot_stack_handle_t stack_depot; + + /* + * list of held locks (drm_modeset_lock) + */ + struct list_head locked; + + /* + * Trylock mode, use only for panic handlers! + */ + bool trylock_only; + + /* Perform interruptible waits on this context. */ + bool interruptible; +}; + +/** + * struct drm_modeset_lock - used for locking modeset resources. + * @mutex: resource locking + * @head: used to hold its place on &drm_atomi_state.locked list when + * part of an atomic update + * + * Used for locking CRTCs and other modeset resources. + */ +struct drm_modeset_lock { + /* + * modeset lock + */ + struct ww_mutex mutex; + + /* + * Resources that are locked as part of an atomic update are added + * to a list (so we know what to unlock at the end). + */ + struct list_head head; +}; + +#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0) + +void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, + uint32_t flags); +void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); +void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); + +void drm_modeset_lock_init(struct drm_modeset_lock *lock); + +/** + * drm_modeset_lock_fini - cleanup lock + * @lock: lock to cleanup + */ +static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) +{ + WARN_ON(!list_empty(&lock->head)); +} + +/** + * drm_modeset_is_locked - equivalent to mutex_is_locked() + * @lock: lock to check + */ +static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) +{ + return ww_mutex_is_locked(&lock->mutex); +} + +/** + * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held() + * @lock: lock to check + */ +static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) +{ + lockdep_assert_held(&lock->mutex.base); +} + +int drm_modeset_lock(struct drm_modeset_lock *lock, + struct drm_modeset_acquire_ctx *ctx); +int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); +void drm_modeset_unlock(struct drm_modeset_lock *lock); + +struct drm_device; +struct drm_crtc; +struct drm_plane; + +void drm_modeset_lock_all(struct drm_device *dev); +void drm_modeset_unlock_all(struct drm_device *dev); +void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); + +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +/** + * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks + * @dev: drm device + * @ctx: local modeset acquire context, will be dereferenced + * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init() + * @ret: local ret/err/etc variable to track error status + * + * Use these macros to simplify grabbing all modeset locks using a local + * context. This has the advantage of reducing boilerplate, but also properly + * checking return values where appropriate. + * + * Any code run between BEGIN and END will be holding the modeset locks. + * + * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and + * forth between the labels on deadlock and error conditions. + * + * Drivers can acquire additional modeset locks. If any lock acquisition + * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with + * the @ret parameter containing the return value of drm_modeset_lock(). + * + * Returns: + * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN() + * is 0, so no error checking is necessary + */ +#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_lock(&dev->mode_config.mutex); \ + drm_modeset_acquire_init(&ctx, flags); \ +modeset_lock_retry: \ + ret = drm_modeset_lock_all_ctx(dev, &ctx); \ + if (ret) \ + goto modeset_lock_fail; + +/** + * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @dev: drm device + * @ctx: local modeset acquire context, will be dereferenced + * @ret: local ret/err/etc variable to track error status + * + * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN + * if ret is -EDEADLK. + * + * It's important that you use the same ret variable for begin and end so + * deadlock conditions are properly handled. + * + * Returns: + * ret will be untouched unless it is -EDEADLK on entry. That means that if you + * successfully acquire the locks, ret will be whatever your code sets it to. If + * there is a deadlock or other failure with acquire or backoff, ret will be set + * to that failure. In both of these cases the code between BEGIN/END will not + * be run, so the failure will reflect the inability to grab the locks. + */ +#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ +modeset_lock_fail: \ + if (ret == -EDEADLK) { \ + ret = drm_modeset_backoff(&ctx); \ + if (!ret) \ + goto modeset_lock_retry; \ + } \ + drm_modeset_drop_locks(&ctx); \ + drm_modeset_acquire_fini(&ctx); \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_unlock(&dev->mode_config.mutex); + +#endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h new file mode 100644 index 000000000..4db1ae03d --- /dev/null +++ b/include/drm/drm_module.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_MODULE_H +#define DRM_MODULE_H + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include <drm/drm_drv.h> + +/** + * DOC: overview + * + * This library provides helpers registering DRM drivers during module + * initialization and shutdown. The provided helpers act like bus-specific + * module helpers, such as module_pci_driver(), but respect additional + * parameters that control DRM driver registration. + * + * Below is an example of initializing a DRM driver for a device on the + * PCI bus. + * + * .. code-block:: c + * + * struct pci_driver my_pci_drv = { + * }; + * + * drm_module_pci_driver(my_pci_drv); + * + * The generated code will test if DRM drivers are enabled and register + * the PCI driver my_pci_drv. For more complex module initialization, you + * can still use module_init() and module_exit() in your driver. + */ + +/* + * PCI drivers + */ + +static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv) +{ + if (drm_firmware_drivers_only()) + return -ENODEV; + + return pci_register_driver(pci_drv); +} + +/** + * drm_module_pci_driver - Register a DRM driver for PCI-based devices + * @__pci_drv: the PCI driver structure + * + * Registers a DRM driver for devices on the PCI bus. The helper + * macro behaves like module_pci_driver() but tests the state of + * drm_firmware_drivers_only(). For more complex module initialization, + * use module_init() and module_exit() directly. + * + * Each module may only use this macro once. Calling it replaces + * module_init() and module_exit(). + */ +#define drm_module_pci_driver(__pci_drv) \ + module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver) + +static inline int __init +drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset) +{ + if (drm_firmware_drivers_only() && modeset == -1) + return -ENODEV; + if (modeset == 0) + return -ENODEV; + + return pci_register_driver(pci_drv); +} + +static inline void __exit +drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset) +{ + pci_unregister_driver(pci_drv); +} + +/** + * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices + * @__pci_drv: the PCI driver structure + * @__modeset: an additional parameter that disables the driver + * + * This macro is deprecated and only provided for existing drivers. For + * new drivers, use drm_module_pci_driver(). + * + * Registers a DRM driver for devices on the PCI bus. The helper macro + * behaves like drm_module_pci_driver() with an additional driver-specific + * flag. If __modeset is 0, the driver has been disabled, if __modeset is + * -1 the driver state depends on the global DRM state. For all other + * values, the PCI driver has been enabled. The default should be -1. + */ +#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \ + module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \ + drm_pci_unregister_driver_if_modeset, __modeset) + +/* + * Platform drivers + */ + +static inline int __init +drm_platform_driver_register(struct platform_driver *platform_drv) +{ + if (drm_firmware_drivers_only()) + return -ENODEV; + + return platform_driver_register(platform_drv); +} + +/** + * drm_module_platform_driver - Register a DRM driver for platform devices + * @__platform_drv: the platform driver structure + * + * Registers a DRM driver for devices on the platform bus. The helper + * macro behaves like module_platform_driver() but tests the state of + * drm_firmware_drivers_only(). For more complex module initialization, + * use module_init() and module_exit() directly. + * + * Each module may only use this macro once. Calling it replaces + * module_init() and module_exit(). + */ +#define drm_module_platform_driver(__platform_drv) \ + module_driver(__platform_drv, drm_platform_driver_register, \ + platform_driver_unregister) + +#endif diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h new file mode 100644 index 000000000..10ab58c40 --- /dev/null +++ b/include/drm/drm_of.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_OF_H__ +#define __DRM_OF_H__ + +#include <linux/of_graph.h> +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) +#include <drm/drm_bridge.h> +#endif + +struct component_master_ops; +struct component_match; +struct device; +struct drm_device; +struct drm_encoder; +struct drm_panel; +struct drm_bridge; +struct device_node; + +/** + * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection + * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated + * from the first port, odd pixels from the second port + * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated + * from the first port, even pixels from the second port + */ +enum drm_lvds_dual_link_pixels { + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0, + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1, +}; + +#ifdef CONFIG_OF +uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port); +uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port); +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node); +int drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops); +int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint); +int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge); +int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, + const struct device_node *port2); +int drm_of_lvds_get_data_mapping(const struct device_node *port); +int drm_of_get_data_lanes_count(const struct device_node *endpoint, + const unsigned int min, const unsigned int max); +int drm_of_get_data_lanes_count_ep(const struct device_node *port, + int port_reg, int reg, + const unsigned int min, + const unsigned int max); +#else +static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline void +drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ +} + +static inline int +drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops) +{ + return -EINVAL; +} + +static inline int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint) +{ + return -EINVAL; +} +static inline int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge) +{ + return -EINVAL; +} + +static inline int +drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, + const struct device_node *port2) +{ + return -EINVAL; +} + +static inline int +drm_of_lvds_get_data_mapping(const struct device_node *port) +{ + return -EINVAL; +} + +static inline int +drm_of_get_data_lanes_count(const struct device_node *endpoint, + const unsigned int min, const unsigned int max) +{ + return -EINVAL; +} + +static inline int +drm_of_get_data_lanes_count_ep(const struct device_node *port, + int port_reg, int reg, + const unsigned int min, + const unsigned int max) +{ + return -EINVAL; +} +#endif + +/* + * drm_of_panel_bridge_remove - remove panel bridge + * @np: device tree node containing panel bridge output ports + * + * Remove the panel bridge of a given DT node's port and endpoint number + * + * Returns zero if successful, or one of the standard error codes if it fails. + */ +static inline int drm_of_panel_bridge_remove(const struct device_node *np, + int port, int endpoint) +{ +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) + struct drm_bridge *bridge; + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, endpoint); + if (!remote) + return -ENODEV; + + bridge = of_drm_find_bridge(remote); + drm_panel_bridge_remove(bridge); + + return 0; +#else + return -EINVAL; +#endif +} + +static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.id; +} + +static inline int drm_of_encoder_active_port_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.port; +} + +#endif /* __DRM_OF_H__ */ diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h new file mode 100644 index 000000000..994bfcdd8 --- /dev/null +++ b/include/drm/drm_panel.h @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_PANEL_H__ +#define __DRM_PANEL_H__ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/list.h> + +struct backlight_device; +struct dentry; +struct device_node; +struct drm_connector; +struct drm_device; +struct drm_panel; +struct display_timing; + +enum drm_panel_orientation; + +/** + * struct drm_panel_funcs - perform operations on a given panel + * + * The .prepare() function is typically called before the display controller + * starts to transmit video data. Panel drivers can use this to turn the panel + * on and wait for it to become ready. If additional configuration is required + * (via a control bus such as I2C, SPI or DSI for example) this is a good time + * to do that. + * + * After the display controller has started transmitting video data, it's safe + * to call the .enable() function. This will typically enable the backlight to + * make the image on screen visible. Some panels require a certain amount of + * time or frames before the image is displayed. This function is responsible + * for taking this into account before enabling the backlight to avoid visual + * glitches. + * + * Before stopping video transmission from the display controller it can be + * necessary to turn off the panel to avoid visual glitches. This is done in + * the .disable() function. Analogously to .enable() this typically involves + * turning off the backlight and waiting for some time to make sure no image + * is visible on the panel. It is then safe for the display controller to + * cease transmission of video data. + * + * To save power when no video data is transmitted, a driver can power down + * the panel. This is the job of the .unprepare() function. + * + * Backlight can be handled automatically if configured using + * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver + * does not need to implement the functionality to enable/disable backlight. + */ +struct drm_panel_funcs { + /** + * @prepare: + * + * Turn on panel and perform set up. + * + * This function is optional. + */ + int (*prepare)(struct drm_panel *panel); + + /** + * @enable: + * + * Enable panel (turn on back light, etc.). + * + * This function is optional. + */ + int (*enable)(struct drm_panel *panel); + + /** + * @disable: + * + * Disable panel (turn off back light, etc.). + * + * This function is optional. + */ + int (*disable)(struct drm_panel *panel); + + /** + * @unprepare: + * + * Turn off panel. + * + * This function is optional. + */ + int (*unprepare)(struct drm_panel *panel); + + /** + * @get_modes: + * + * Add modes to the connector that the panel is attached to + * and returns the number of modes added. + * + * This function is mandatory. + */ + int (*get_modes)(struct drm_panel *panel, + struct drm_connector *connector); + + /** + * @get_orientation: + * + * Return the panel orientation set by device tree or EDID. + * + * This function is optional. + */ + enum drm_panel_orientation (*get_orientation)(struct drm_panel *panel); + + /** + * @get_timings: + * + * Copy display timings into the provided array and return + * the number of display timings available. + * + * This function is optional. + */ + int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, + struct display_timing *timings); + + /** + * @debugfs_init: + * + * Allows panels to create panels-specific debugfs files. + */ + void (*debugfs_init)(struct drm_panel *panel, struct dentry *root); +}; + +/** + * struct drm_panel - DRM panel object + */ +struct drm_panel { + /** + * @dev: + * + * Parent device of the panel. + */ + struct device *dev; + + /** + * @backlight: + * + * Backlight device, used to turn on backlight after the call + * to enable(), and to turn off backlight before the call to + * disable(). + * backlight is set by drm_panel_of_backlight() or + * drm_panel_dp_aux_backlight() and drivers shall not assign it. + */ + struct backlight_device *backlight; + + /** + * @funcs: + * + * Operations that can be performed on the panel. + */ + const struct drm_panel_funcs *funcs; + + /** + * @connector_type: + * + * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to + * initialise the drm_connector corresponding to the panel with the + * correct connector type. + */ + int connector_type; + + /** + * @list: + * + * Panel entry in registry. + */ + struct list_head list; +}; + +void drm_panel_init(struct drm_panel *panel, struct device *dev, + const struct drm_panel_funcs *funcs, + int connector_type); + +void drm_panel_add(struct drm_panel *panel); +void drm_panel_remove(struct drm_panel *panel); + +int drm_panel_prepare(struct drm_panel *panel); +int drm_panel_unprepare(struct drm_panel *panel); + +int drm_panel_enable(struct drm_panel *panel); +int drm_panel_disable(struct drm_panel *panel); + +int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector); + +#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) +struct drm_panel *of_drm_find_panel(const struct device_node *np); +int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation); +#else +static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) +{ + return ERR_PTR(-ENODEV); +} + +static inline int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation) +{ + return -ENODEV; +} +#endif + +#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) +int drm_panel_of_backlight(struct drm_panel *panel); +#else +static inline int drm_panel_of_backlight(struct drm_panel *panel) +{ + return 0; +} +#endif + +#endif diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h new file mode 100644 index 000000000..b7e899ce4 --- /dev/null +++ b/include/drm/drm_pciids.h @@ -0,0 +1,814 @@ +/* SPDX-License-Identifier: MIT */ +#define radeon_PCI_IDS \ + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ + {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ + {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6703, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6704, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6705, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6706, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6707, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6708, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6718, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0, 0, 0} + +#define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mga_PCI_IDS \ + {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ + {0, 0, 0} + +#define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0, 0, 0} + +#define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ + {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0, 0, 0} + +#define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define savage_PCI_IDS \ + {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0, 0, 0} diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h new file mode 100644 index 000000000..447e664e4 --- /dev/null +++ b/include/drm/drm_plane.h @@ -0,0 +1,949 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PLANE_H__ +#define __DRM_PLANE_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_color_mgmt.h> +#include <drm/drm_rect.h> +#include <drm/drm_modeset_lock.h> +#include <drm/drm_util.h> + +struct drm_crtc; +struct drm_printer; +struct drm_modeset_acquire_ctx; + +enum drm_scaling_filter { + DRM_SCALING_FILTER_DEFAULT, + DRM_SCALING_FILTER_NEAREST_NEIGHBOR, +}; + +/** + * struct drm_plane_state - mutable plane state + * + * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and + * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the + * raw coordinates provided by userspace. Drivers should use + * drm_atomic_helper_check_plane_state() and only use the derived rectangles in + * @src and @dst to program the hardware. + */ +struct drm_plane_state { + /** @plane: backpointer to the plane */ + struct drm_plane *plane; + + /** + * @crtc: + * + * Currently bound CRTC, NULL if disabled. Do not this write directly, + * use drm_atomic_set_crtc_for_plane() + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer. Do not write this directly, use + * drm_atomic_set_fb_for_plane() + */ + struct drm_framebuffer *fb; + + /** + * @fence: + * + * Optional fence to wait for before scanning out @fb. The core atomic + * code will set this when userspace is using explicit fencing. Do not + * write this field directly for a driver's implicit fence. + * + * Drivers should store any implicit fence in this from their + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() + * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers. + */ + struct dma_fence *fence; + + /** + * @crtc_x: + * + * Left position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + + int32_t crtc_x; + /** + * @crtc_y: + * + * Upper position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + int32_t crtc_y; + + /** @crtc_w: width of visible portion of plane on crtc */ + /** @crtc_h: height of visible portion of plane on crtc */ + uint32_t crtc_w, crtc_h; + + /** + * @src_x: left position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_x; + /** + * @src_y: upper position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_y; + /** @src_w: width of visible portion of plane (in 16.16) */ + /** @src_h: height of visible portion of plane (in 16.16) */ + uint32_t src_h, src_w; + + /** + * @alpha: + * Opacity of the plane with 0 as completely transparent and 0xffff as + * completely opaque. See drm_plane_create_alpha_property() for more + * details. + */ + u16 alpha; + + /** + * @pixel_blend_mode: + * The alpha blending equation selection, describing how the pixels from + * the current plane are composited with the background. Value can be + * one of DRM_MODE_BLEND_* + */ + uint16_t pixel_blend_mode; + + /** + * @rotation: + * Rotation of the plane. See drm_plane_create_rotation_property() for + * more details. + */ + unsigned int rotation; + + /** + * @zpos: + * Priority of the given plane on crtc (optional). + * + * User-space may set mutable zpos properties so that multiple active + * planes on the same CRTC have identical zpos values. This is a + * user-space bug, but drivers can solve the conflict by comparing the + * plane object IDs; the plane with a higher ID is stacked on top of a + * plane with a lower ID. + * + * See drm_plane_create_zpos_property() and + * drm_plane_create_zpos_immutable_property() for more details. + */ + unsigned int zpos; + + /** + * @normalized_zpos: + * Normalized value of zpos: unique, range from 0 to N-1 where N is the + * number of active planes for given crtc. Note that the driver must set + * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to + * update this before it can be trusted. + */ + unsigned int normalized_zpos; + + /** + * @color_encoding: + * + * Color encoding for non RGB formats + */ + enum drm_color_encoding color_encoding; + + /** + * @color_range: + * + * Color range for non RGB formats + */ + enum drm_color_range color_range; + + /** + * @fb_damage_clips: + * + * Blob representing damage (area in plane framebuffer that changed + * since last plane update) as an array of &drm_mode_rect in framebuffer + * coodinates of the attached framebuffer. Note that unlike plane src, + * damage clips are not in 16.16 fixed point. + * + * See drm_plane_get_damage_clips() and + * drm_plane_get_damage_clips_count() for accessing these. + */ + struct drm_property_blob *fb_damage_clips; + + /** + * @src: + * + * source coordinates of the plane (in 16.16). + * + * When using drm_atomic_helper_check_plane_state(), + * the coordinates are clipped, but the driver may choose + * to use unclipped coordinates instead when the hardware + * performs the clipping automatically. + */ + /** + * @dst: + * + * clipped destination coordinates of the plane. + * + * When using drm_atomic_helper_check_plane_state(), + * the coordinates are clipped, but the driver may choose + * to use unclipped coordinates instead when the hardware + * performs the clipping automatically. + */ + struct drm_rect src, dst; + + /** + * @visible: + * + * Visibility of the plane. This can be false even if fb!=NULL and + * crtc!=NULL, due to clipping. + */ + bool visible; + + /** + * @scaling_filter: + * + * Scaling filter to be applied + */ + enum drm_scaling_filter scaling_filter; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions, + * and for async plane updates. + * + * May be NULL. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +static inline struct drm_rect +drm_plane_state_src(const struct drm_plane_state *state) +{ + struct drm_rect src = { + .x1 = state->src_x, + .y1 = state->src_y, + .x2 = state->src_x + state->src_w, + .y2 = state->src_y + state->src_h, + }; + return src; +} + +static inline struct drm_rect +drm_plane_state_dest(const struct drm_plane_state *state) +{ + struct drm_rect dest = { + .x1 = state->crtc_x, + .y1 = state->crtc_y, + .x2 = state->crtc_x + state->crtc_w, + .y2 = state->crtc_y + state->crtc_h, + }; + return dest; +} + +/** + * struct drm_plane_funcs - driver plane control functions + */ +struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -<errorname>EINVAL</errorname> error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*update_plane)(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*disable_plane)(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_plane *plane); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_plane *plane, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_plane.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE IOCTL. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ + int (*atomic_get_property)(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val); + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the plane like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_plane *plane); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the plane from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_plane *plane); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_plane_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_plane_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_plane_state *state); + + /** + * @format_mod_supported: + * + * This optional hook is used for the DRM to determine if the given + * format/modifier combination is valid for the plane. This allows the + * DRM to generate the correct format bitmask (which formats apply to + * which modifier), and to validate modifiers at atomic_check time. + * + * If not present, then any modifier in the plane's modifier + * list is allowed with any of the plane's formats. + * + * Returns: + * + * True if the given modifier is valid for that format on the plane. + * False otherwise. + */ + bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, + uint64_t modifier); +}; + +/** + * enum drm_plane_type - uapi plane type enumeration + * + * For historical reasons not all planes are made the same. This enumeration is + * used to tell the different types of planes apart to implement the different + * uapi semantics for them. For userspace which is universal plane aware and + * which is using that atomic IOCTL there's no difference between these planes + * (beyong what the driver and hardware can support of course). + * + * For compatibility with legacy userspace, only overlay planes are made + * available to userspace by default. Userspace clients may set the + * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * wish to receive a universal plane list containing all plane types. See also + * drm_for_each_legacy_plane(). + * + * In addition to setting each plane's type, drivers need to setup the + * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy + * IOCTLs. See drm_crtc_init_with_planes(). + * + * WARNING: The values of this enum is UABI since they're exposed in the "type" + * property. + */ +enum drm_plane_type { + /** + * @DRM_PLANE_TYPE_OVERLAY: + * + * Overlay planes represent all non-primary, non-cursor planes. Some + * drivers refer to these types of planes as "sprites" internally. + */ + DRM_PLANE_TYPE_OVERLAY, + + /** + * @DRM_PLANE_TYPE_PRIMARY: + * + * A primary plane attached to a CRTC is the most likely to be able to + * light up the CRTC when no scaling/cropping is used and the plane + * covers the whole CRTC. + */ + DRM_PLANE_TYPE_PRIMARY, + + /** + * @DRM_PLANE_TYPE_CURSOR: + * + * A cursor plane attached to a CRTC is more likely to be able to be + * enabled when no scaling/cropping is used and the framebuffer has the + * size indicated by &drm_mode_config.cursor_width and + * &drm_mode_config.cursor_height. Additionally, if the driver doesn't + * support modifiers, the framebuffer should have a linear layout. + */ + DRM_PLANE_TYPE_CURSOR, +}; + + +/** + * struct drm_plane - central DRM plane control structure + * + * Planes represent the scanout hardware of a display block. They receive their + * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control + * the color conversion, see `Plane Composition Properties`_ for more details, + * and are also involved in the color conversion of input pixels, see `Color + * Management Properties`_ for details on that. + */ +struct drm_plane { + /** @dev: DRM device this plane belongs to */ + struct drm_device *dev; + + /** + * @head: + * + * List of all planes on @dev, linked from &drm_mode_config.plane_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * Protects modeset plane state, together with the &drm_crtc.mutex of + * CRTC this plane is linked to (when active, getting activated or + * getting disabled). + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base mode object */ + struct drm_mode_object base; + + /** + * @possible_crtcs: pipes this plane can be bound to constructed from + * drm_crtc_mask() + */ + uint32_t possible_crtcs; + /** @format_types: array of formats supported by this plane */ + uint32_t *format_types; + /** @format_count: Size of the array pointed at by @format_types. */ + unsigned int format_count; + /** + * @format_default: driver hasn't supplied supported formats for the + * plane. Used by the non-atomic driver compatibility wrapper only. + */ + bool format_default; + + /** @modifiers: array of modifiers supported by this plane */ + uint64_t *modifiers; + /** @modifier_count: Size of the array pointed at by @modifier_count. */ + unsigned int modifier_count; + + /** + * @crtc: + * + * Currently bound CRTC, only meaningful for non-atomic drivers. For + * atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.crtc. + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer, only meaningful for non-atomic drivers. + * For atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.fb. + */ + struct drm_framebuffer *fb; + + /** + * @old_fb: + * + * Temporary tracking of the old fb while a modeset is ongoing. Only + * used by non-atomic drivers, forced to be NULL for atomic drivers. + */ + struct drm_framebuffer *old_fb; + + /** @funcs: plane control functions */ + const struct drm_plane_funcs *funcs; + + /** @properties: property tracking for this plane */ + struct drm_object_properties properties; + + /** @type: Type of plane, see &enum drm_plane_type for details. */ + enum drm_plane_type type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the plane. + */ + unsigned index; + + /** @helper_private: mid-layer private data */ + const struct drm_plane_helper_funcs *helper_private; + + /** + * @state: + * + * Current atomic state for this plane. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current plane state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and + * for_each_new_plane_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_plane_state *state; + + /** + * @alpha_property: + * Optional alpha property for this plane. See + * drm_plane_create_alpha_property(). + */ + struct drm_property *alpha_property; + /** + * @zpos_property: + * Optional zpos property for this plane. See + * drm_plane_create_zpos_property(). + */ + struct drm_property *zpos_property; + /** + * @rotation_property: + * Optional rotation property for this plane. See + * drm_plane_create_rotation_property(). + */ + struct drm_property *rotation_property; + /** + * @blend_mode_property: + * Optional "pixel blend mode" enum property for this plane. + * Blend mode property represents the alpha blending equation selection, + * describing how the pixels from the current plane are composited with + * the background. + */ + struct drm_property *blend_mode_property; + + /** + * @color_encoding_property: + * + * Optional "COLOR_ENCODING" enum property for specifying + * color encoding for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_encoding_property; + /** + * @color_range_property: + * + * Optional "COLOR_RANGE" enum property for specifying + * color range for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_range_property; + + /** + * @scaling_filter_property: property to apply a particular filter while + * scaling. + */ + struct drm_property *scaling_filter_property; +}; + +#define obj_to_plane(x) container_of(x, struct drm_plane, base) + +__printf(9, 10) +int drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...); +void drm_plane_cleanup(struct drm_plane *plane); + +__printf(10, 11) +void *__drmm_universal_plane_alloc(struct drm_device *dev, + size_t size, size_t offset, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type plane_type, + const char *name, ...); + +/** + * drmm_universal_plane_alloc - Allocate and initialize an universal plane object + * @dev: DRM device + * @type: the type of the struct which contains struct &drm_plane + * @member: the name of the &drm_plane within @type + * @possible_crtcs: bitmask of possible CRTCs + * @funcs: callbacks for the new plane + * @formats: array of supported formats (DRM_FORMAT\_\*) + * @format_count: number of elements in @formats + * @format_modifiers: array of struct drm_format modifiers terminated by + * DRM_FORMAT_MOD_INVALID + * @plane_type: type of plane (overlay, primary, cursor) + * @name: printf style format string for the plane name, or NULL for default name + * + * Allocates and initializes a plane object of type @type. Cleanup is + * automatically handled through registering drm_plane_cleanup() with + * drmm_add_action(). + * + * The @drm_plane_funcs.destroy hook must be NULL. + * + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. + * + * Returns: + * Pointer to new plane, or ERR_PTR on failure. + */ +#define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \ + format_count, format_modifiers, plane_type, name, ...) \ + ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \ + offsetof(type, member), \ + possible_crtcs, funcs, formats, \ + format_count, format_modifiers, \ + plane_type, name, ##__VA_ARGS__)) + +__printf(10, 11) +void *__drm_universal_plane_alloc(struct drm_device *dev, + size_t size, size_t offset, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type plane_type, + const char *name, ...); + +/** + * drm_universal_plane_alloc() - Allocate and initialize an universal plane object + * @dev: DRM device + * @type: the type of the struct which contains struct &drm_plane + * @member: the name of the &drm_plane within @type + * @possible_crtcs: bitmask of possible CRTCs + * @funcs: callbacks for the new plane + * @formats: array of supported formats (DRM_FORMAT\_\*) + * @format_count: number of elements in @formats + * @format_modifiers: array of struct drm_format modifiers terminated by + * DRM_FORMAT_MOD_INVALID + * @plane_type: type of plane (overlay, primary, cursor) + * @name: printf style format string for the plane name, or NULL for default name + * + * Allocates and initializes a plane object of type @type. The caller + * is responsible for releasing the allocated memory with kfree(). + * + * Drivers are encouraged to use drmm_universal_plane_alloc() instead. + * + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. + * + * Returns: + * Pointer to new plane, or ERR_PTR on failure. + */ +#define drm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \ + format_count, format_modifiers, plane_type, name, ...) \ + ((type *)__drm_universal_plane_alloc(dev, sizeof(type), \ + offsetof(type, member), \ + possible_crtcs, funcs, formats, \ + format_count, format_modifiers, \ + plane_type, name, ##__VA_ARGS__)) + +/** + * drm_plane_index - find the index of a registered plane + * @plane: plane to find index for + * + * Given a registered plane, return the index of that plane within a DRM + * device's list of planes. + */ +static inline unsigned int drm_plane_index(const struct drm_plane *plane) +{ + return plane->index; +} + +/** + * drm_plane_mask - find the mask of a registered plane + * @plane: plane to find mask for + */ +static inline u32 drm_plane_mask(const struct drm_plane *plane) +{ + return 1 << drm_plane_index(plane); +} + +struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); +void drm_plane_force_disable(struct drm_plane *plane); + +int drm_mode_plane_set_obj_prop(struct drm_plane *plane, + struct drm_property *property, + uint64_t value); + +/** + * drm_plane_find - find a &drm_plane + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: plane id + * + * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_plane *drm_plane_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); + return mo ? obj_to_plane(mo) : NULL; +} + +/** + * drm_for_each_plane_mask - iterate over planes specified by bitmask + * @plane: the loop cursor + * @dev: the DRM device + * @plane_mask: bitmask of plane indices + * + * Iterate over all planes specified by bitmask. + */ +#define drm_for_each_plane_mask(plane, dev, plane_mask) \ + list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ + for_each_if ((plane_mask) & drm_plane_mask(plane)) + +/** + * drm_for_each_legacy_plane - iterate over all planes for legacy userspace + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all legacy planes of @dev, excluding primary and cursor planes. + * This is useful for implementing userspace apis when userspace is not + * universal plane aware. See also &enum drm_plane_type. + */ +#define drm_for_each_legacy_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) + +/** + * drm_for_each_plane - iterate over all planes + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all planes of @dev, include primary and cursor planes. + */ +#define drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) + +bool drm_any_plane_has_format(struct drm_device *dev, + u32 format, u64 modifier); + +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); +unsigned int +drm_plane_get_damage_clips_count(const struct drm_plane_state *state); +struct drm_mode_rect * +drm_plane_get_damage_clips(const struct drm_plane_state *state); + +int drm_plane_create_scaling_filter_property(struct drm_plane *plane, + unsigned int supported_filters); + +#endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h new file mode 100644 index 000000000..3a574e8cd --- /dev/null +++ b/include/drm/drm_plane_helper.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_PLANE_HELPER_H +#define DRM_PLANE_HELPER_H + +#include <linux/types.h> + +struct drm_atomic_state; +struct drm_crtc; +struct drm_framebuffer; +struct drm_modeset_acquire_ctx; +struct drm_plane; + +int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_plane_helper_disable_primary(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); +void drm_plane_helper_destroy(struct drm_plane *plane); +int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); + +/** + * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers + * + * This macro initializes plane functions for non-atomic drivers to default + * values. Non-atomic interfaces are deprecated and should not be used in new + * drivers. + */ +#define DRM_PLANE_NON_ATOMIC_FUNCS \ + .update_plane = drm_plane_helper_update_primary, \ + .disable_plane = drm_plane_helper_disable_primary, \ + .destroy = drm_plane_helper_destroy + +#endif diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h new file mode 100644 index 000000000..2a1d01e5b --- /dev/null +++ b/include/drm/drm_prime.h @@ -0,0 +1,113 @@ +/* + * Copyright © 2012 Red Hat + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Rob Clark <rob.clark@linaro.org> + * + */ + +#ifndef __DRM_PRIME_H__ +#define __DRM_PRIME_H__ + +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/scatterlist.h> + +/** + * struct drm_prime_file_private - per-file tracking for PRIME + * + * This just contains the internal &struct dma_buf and handle caches for each + * &struct drm_file used by the PRIME core code. + */ +struct drm_prime_file_private { +/* private: */ + struct mutex lock; + struct rb_root dmabufs; + struct rb_root handles; +}; + +struct device; + +struct dma_buf_export_info; +struct dma_buf; +struct dma_buf_attachment; +struct iosys_map; + +enum dma_data_direction; + +struct drm_device; +struct drm_gem_object; +struct drm_file; + +/* core prime functions */ +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +void drm_gem_dmabuf_release(struct dma_buf *dma_buf); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); + +/* helper functions for exporting */ +int drm_gem_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +void drm_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir); +void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir); +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map); + +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); + +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages); +struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, + int flags); + +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt); + +/* helper functions for importing */ +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); + +int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages, + int max_pages); +int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, + int max_pages); + +#endif /* __DRM_PRIME_H__ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h new file mode 100644 index 000000000..094ded235 --- /dev/null +++ b/include/drm/drm_print.h @@ -0,0 +1,640 @@ +/* + * Copyright (C) 2016 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + */ + +#ifndef DRM_PRINT_H_ +#define DRM_PRINT_H_ + +#include <linux/compiler.h> +#include <linux/printk.h> +#include <linux/seq_file.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/dynamic_debug.h> + +#include <drm/drm.h> + +/* Do *not* use outside of drm_print.[ch]! */ +extern unsigned long __drm_debug; + +/** + * DOC: print + * + * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same + * debug code to be used for both debugfs and printk logging. + * + * For example:: + * + * void log_some_info(struct drm_printer *p) + * { + * drm_printf(p, "foo=%d\n", foo); + * drm_printf(p, "bar=%d\n", bar); + * } + * + * #ifdef CONFIG_DEBUG_FS + * void debugfs_show(struct seq_file *f) + * { + * struct drm_printer p = drm_seq_file_printer(f); + * log_some_info(&p); + * } + * #endif + * + * void some_other_function(...) + * { + * struct drm_printer p = drm_info_printer(drm->dev); + * log_some_info(&p); + * } + */ + +/** + * struct drm_printer - drm output "stream" + * + * Do not use struct members directly. Use drm_printer_seq_file(), + * drm_printer_info(), etc to initialize. And drm_printf() for output. + */ +struct drm_printer { + /* private: */ + void (*printfn)(struct drm_printer *p, struct va_format *vaf); + void (*puts)(struct drm_printer *p, const char *str); + void *arg; + const char *prefix; +}; + +void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_coredump(struct drm_printer *p, const char *str); +void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_seq_file(struct drm_printer *p, const char *str); +void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); + +__printf(2, 3) +void drm_printf(struct drm_printer *p, const char *f, ...); +void drm_puts(struct drm_printer *p, const char *str); +void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); +void drm_print_bits(struct drm_printer *p, unsigned long value, + const char * const bits[], unsigned int nbits); + +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) + +/** + * struct drm_print_iterator - local struct used with drm_printer_coredump + * @data: Pointer to the devcoredump output buffer + * @start: The offset within the buffer to start writing + * @remain: The number of bytes to write for this iteration + */ +struct drm_print_iterator { + void *data; + ssize_t start; + ssize_t remain; + /* private: */ + ssize_t offset; +}; + +/** + * drm_coredump_printer - construct a &drm_printer that can output to a buffer + * from the read function for devcoredump + * @iter: A pointer to a struct drm_print_iterator for the read instance + * + * This wrapper extends drm_printf() to work with a dev_coredumpm() callback + * function. The passed in drm_print_iterator struct contains the buffer + * pointer, size and offset as passed in from devcoredump. + * + * For example:: + * + * void coredump_read(char *buffer, loff_t offset, size_t count, + * void *data, size_t datalen) + * { + * struct drm_print_iterator iter; + * struct drm_printer p; + * + * iter.data = buffer; + * iter.start = offset; + * iter.remain = count; + * + * p = drm_coredump_printer(&iter); + * + * drm_printf(p, "foo=%d\n", foo); + * } + * + * void makecoredump(...) + * { + * ... + * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL, + * coredump_read, ...) + * } + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer +drm_coredump_printer(struct drm_print_iterator *iter) +{ + struct drm_printer p = { + .printfn = __drm_printfn_coredump, + .puts = __drm_puts_coredump, + .arg = iter, + }; + + /* Set the internal offset of the iterator to zero */ + iter->offset = 0; + + return p; +} + +/** + * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file + * @f: the &struct seq_file to output to + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) +{ + struct drm_printer p = { + .printfn = __drm_printfn_seq_file, + .puts = __drm_puts_seq_file, + .arg = f, + }; + return p; +} + +/** + * drm_info_printer - construct a &drm_printer that outputs to dev_printk() + * @dev: the &struct device pointer + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_info_printer(struct device *dev) +{ + struct drm_printer p = { + .printfn = __drm_printfn_info, + .arg = dev, + }; + return p; +} + +/** + * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_debug_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_debug, + .prefix = prefix + }; + return p; +} + +/** + * drm_err_printer - construct a &drm_printer that outputs to pr_err() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_err_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_err, + .prefix = prefix + }; + return p; +} + +/** + * enum drm_debug_category - The DRM debug categories + * + * Each of the DRM debug logging macros use a specific category, and the logging + * is filtered by the drm.debug module parameter. This enum specifies the values + * for the interface. + * + * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except + * DRM_DEBUG() logs to DRM_UT_CORE. + * + * Enabling verbose debug messages is done through the drm.debug parameter, each + * category being enabled by a bit: + * + * - drm.debug=0x1 will enable CORE messages + * - drm.debug=0x2 will enable DRIVER messages + * - drm.debug=0x3 will enable CORE and DRIVER messages + * - ... + * - drm.debug=0x1ff will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node:: + * + * # echo 0xf > /sys/module/drm/parameters/debug + * + */ +enum drm_debug_category { + /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */ + /** + * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, + * drm_memory.c, ... + */ + DRM_UT_CORE, + /** + * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, + * radeon, ... macro. + */ + DRM_UT_DRIVER, + /** + * @DRM_UT_KMS: Used in the modesetting code. + */ + DRM_UT_KMS, + /** + * @DRM_UT_PRIME: Used in the prime code. + */ + DRM_UT_PRIME, + /** + * @DRM_UT_ATOMIC: Used in the atomic code. + */ + DRM_UT_ATOMIC, + /** + * @DRM_UT_VBL: Used for verbose debug message in the vblank code. + */ + DRM_UT_VBL, + /** + * @DRM_UT_STATE: Used for verbose atomic state debugging. + */ + DRM_UT_STATE, + /** + * @DRM_UT_LEASE: Used in the lease code. + */ + DRM_UT_LEASE, + /** + * @DRM_UT_DP: Used in the DP code. + */ + DRM_UT_DP, + /** + * @DRM_UT_DRMRES: Used in the drm managed resources code. + */ + DRM_UT_DRMRES +}; + +static inline bool drm_debug_enabled_raw(enum drm_debug_category category) +{ + return unlikely(__drm_debug & BIT(category)); +} + +#define drm_debug_enabled_instrumented(category) \ + ({ \ + pr_debug("todo: is this frequent enough to optimize ?\n"); \ + drm_debug_enabled_raw(category); \ + }) + +#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) +/* + * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets + * a descriptor, and only enabled callsites are reachable. They use + * the private macro to avoid re-testing the enable-bit. + */ +#define __drm_debug_enabled(category) true +#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category) +#else +#define __drm_debug_enabled(category) drm_debug_enabled_raw(category) +#define drm_debug_enabled(category) drm_debug_enabled_raw(category) +#endif + +/* + * struct device based logging + * + * Prefer drm_device based logging over device or printk based logging. + */ + +__printf(3, 4) +void drm_dev_printk(const struct device *dev, const char *level, + const char *format, ...); +struct _ddebug; +__printf(4, 5) +void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, + enum drm_debug_category category, const char *format, ...); + +/** + * DRM_DEV_ERROR() - Error output. + * + * NOTE: this is deprecated in favor of drm_err() or dev_err(). + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__) + +/** + * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output. + * + * NOTE: this is deprecated in favor of drm_err_ratelimited() or + * dev_err_ratelimited(). + * + * @dev: device pointer + * @fmt: printf() like format string. + * + * Like DRM_ERROR() but won't flood the log. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) + +/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */ +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */ +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) +#define drm_dev_dbg(dev, cat, fmt, ...) \ + __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__) +#else +#define drm_dev_dbg(dev, cat, fmt, ...) \ + _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ + dev, cat, fmt, ##__VA_ARGS__) +#endif + +/** + * DRM_DEV_DEBUG() - Debug output for generic drm code + * + * NOTE: this is deprecated in favor of drm_dbg_core(). + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) +/** + * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver + * + * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg(). + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) +/** + * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code + * + * NOTE: this is deprecated in favor of drm_dbg_kms(). + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +/* + * struct drm_device based logging + * + * Prefer drm_device based logging over device or prink based logging. + */ + +/* Helper for struct drm_device based logging. */ +#define __drm_printk(drm, level, type, fmt, ...) \ + dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__) + + +#define drm_info(drm, fmt, ...) \ + __drm_printk((drm), info,, fmt, ##__VA_ARGS__) + +#define drm_notice(drm, fmt, ...) \ + __drm_printk((drm), notice,, fmt, ##__VA_ARGS__) + +#define drm_warn(drm, fmt, ...) \ + __drm_printk((drm), warn,, fmt, ##__VA_ARGS__) + +#define drm_err(drm, fmt, ...) \ + __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_info_once(drm, fmt, ...) \ + __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__) + +#define drm_notice_once(drm, fmt, ...) \ + __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__) + +#define drm_warn_once(drm, fmt, ...) \ + __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__) + +#define drm_err_once(drm, fmt, ...) \ + __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_err_ratelimited(drm, fmt, ...) \ + __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_dbg_core(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) +#define drm_dbg_driver(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) +#define drm_dbg_kms(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) +#define drm_dbg_prime(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) +#define drm_dbg_atomic(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) +#define drm_dbg_vbl(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) +#define drm_dbg_state(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) +#define drm_dbg_lease(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) +#define drm_dbg_dp(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) +#define drm_dbg_drmres(drm, fmt, ...) \ + drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + +#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) + +/* + * printk based logging + * + * Prefer drm_device based logging over device or prink based logging. + */ + +__printf(3, 4) +void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...); +__printf(1, 2) +void __drm_err(const char *format, ...); + +#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) +#define __drm_dbg(cat, fmt, ...) ___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__) +#else +#define __drm_dbg(cat, fmt, ...) \ + _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \ + cat, fmt, ##__VA_ARGS__) +#endif + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of pr_info(). */ +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +/* NOTE: this is deprecated in favor of pr_notice(). */ +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +/* NOTE: this is deprecated in favor of pr_warn(). */ +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of pr_info_once(). */ +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +/* NOTE: this is deprecated in favor of pr_notice_once(). */ +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +/* NOTE: this is deprecated in favor of pr_warn_once(). */ +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of pr_err(). */ +#define DRM_ERROR(fmt, ...) \ + __drm_err(fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */ +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */ +#define DRM_DEBUG(fmt, ...) \ + __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */ +#define DRM_DEBUG_DRIVER(fmt, ...) \ + __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */ +#define DRM_DEBUG_KMS(fmt, ...) \ + __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */ +#define DRM_DEBUG_PRIME(fmt, ...) \ + __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */ +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */ +#define DRM_DEBUG_VBL(fmt, ...) \ + __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */ +#define DRM_DEBUG_LEASE(fmt, ...) \ + __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */ +#define DRM_DEBUG_DP(fmt, ...) \ + __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) + +#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ + const struct drm_device *drm_ = (drm); \ + \ + if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ + drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ +}) + +#define drm_dbg_kms_ratelimited(drm, fmt, ...) \ + __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__) + +/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */ +#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__) + +/* + * struct drm_device based WARNs + * + * drm_WARN*() acts like WARN*(), but with the key difference of + * using device specific information so that we know from which device + * warning is originating from. + * + * Prefer drm_device based drm_WARN* over regular WARN* + */ + +/* Helper for struct drm_device based WARNs */ +#define drm_WARN(drm, condition, format, arg...) \ + WARN(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) + +#define drm_WARN_ONCE(drm, condition, format, arg...) \ + WARN_ONCE(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) + +#define drm_WARN_ON(drm, x) \ + drm_WARN((drm), (x), "%s", \ + "drm_WARN_ON(" __stringify(x) ")") + +#define drm_WARN_ON_ONCE(drm, x) \ + drm_WARN_ONCE((drm), (x), "%s", \ + "drm_WARN_ON_ONCE(" __stringify(x) ")") + +#endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_privacy_screen_consumer.h b/include/drm/drm_privacy_screen_consumer.h new file mode 100644 index 000000000..7f66a90d1 --- /dev/null +++ b/include/drm/drm_privacy_screen_consumer.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_CONSUMER_H__ +#define __DRM_PRIVACY_SCREEN_CONSUMER_H__ + +#include <linux/device.h> +#include <drm/drm_connector.h> + +struct drm_privacy_screen; + +#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) +struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id); +void drm_privacy_screen_put(struct drm_privacy_screen *priv); + +int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state); +void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret); + +int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb); +int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb); +#else +static inline struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} +static inline void drm_privacy_screen_put(struct drm_privacy_screen *priv) +{ +} +static inline int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state) +{ + return -ENODEV; +} +static inline void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret) +{ + *sw_state_ret = PRIVACY_SCREEN_DISABLED; + *hw_state_ret = PRIVACY_SCREEN_DISABLED; +} +static inline int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return -ENODEV; +} +static inline int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h new file mode 100644 index 000000000..4ef246d57 --- /dev/null +++ b/include/drm/drm_privacy_screen_driver.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_DRIVER_H__ +#define __DRM_PRIVACY_SCREEN_DRIVER_H__ + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <drm/drm_connector.h> + +struct drm_privacy_screen; + +/** + * struct drm_privacy_screen_ops - drm_privacy_screen operations + * + * Defines the operations which the privacy-screen class code may call. + * These functions should be implemented by the privacy-screen driver. + */ +struct drm_privacy_screen_ops { + /** + * @set_sw_state: Called to request a change of the privacy-screen + * state. The privacy-screen class code contains a check to avoid this + * getting called when the hw_state reports the state is locked. + * It is the driver's responsibility to update sw_state and hw_state. + * This is always called with the drm_privacy_screen's lock held. + */ + int (*set_sw_state)(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state); + /** + * @get_hw_state: Called to request that the driver gets the current + * privacy-screen state from the hardware and then updates sw_state and + * hw_state accordingly. This will be called by the core just before + * the privacy-screen is registered in sysfs. + */ + void (*get_hw_state)(struct drm_privacy_screen *priv); +}; + +/** + * struct drm_privacy_screen - central privacy-screen structure + * + * Central privacy-screen structure, this contains the struct device used + * to register the screen in sysfs, the screen's state, ops, etc. + */ +struct drm_privacy_screen { + /** @dev: device used to register the privacy-screen in sysfs. */ + struct device dev; + /** @lock: mutex protection all fields in this struct. */ + struct mutex lock; + /** @list: privacy-screen devices list list-entry. */ + struct list_head list; + /** @notifier_head: privacy-screen notifier head. */ + struct blocking_notifier_head notifier_head; + /** + * @ops: &struct drm_privacy_screen_ops for this privacy-screen. + * This is NULL if the driver has unregistered the privacy-screen. + */ + const struct drm_privacy_screen_ops *ops; + /** + * @sw_state: The privacy-screen's software state, see + * :ref:`Standard Connector Properties<standard_connector_properties>` + * for more info. + */ + enum drm_privacy_screen_status sw_state; + /** + * @hw_state: The privacy-screen's hardware state, see + * :ref:`Standard Connector Properties<standard_connector_properties>` + * for more info. + */ + enum drm_privacy_screen_status hw_state; + /** + * @drvdata: Private data owned by the privacy screen provider + */ + void *drvdata; +}; + +static inline +void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv) +{ + return priv->drvdata; +} + +struct drm_privacy_screen *drm_privacy_screen_register( + struct device *parent, const struct drm_privacy_screen_ops *ops, + void *data); +void drm_privacy_screen_unregister(struct drm_privacy_screen *priv); + +void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv); + +#endif diff --git a/include/drm/drm_privacy_screen_machine.h b/include/drm/drm_privacy_screen_machine.h new file mode 100644 index 000000000..02e537190 --- /dev/null +++ b/include/drm/drm_privacy_screen_machine.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_MACHINE_H__ +#define __DRM_PRIVACY_SCREEN_MACHINE_H__ + +#include <linux/list.h> + +/** + * struct drm_privacy_screen_lookup - static privacy-screen lookup list entry + * + * Used for the static lookup-list for mapping privacy-screen consumer + * dev-connector pairs to a privacy-screen provider. + */ +struct drm_privacy_screen_lookup { + /** @list: Lookup list list-entry. */ + struct list_head list; + /** @dev_id: Consumer device name or NULL to match all devices. */ + const char *dev_id; + /** @con_id: Consumer connector name or NULL to match all connectors. */ + const char *con_id; + /** @provider: dev_name() of the privacy_screen provider. */ + const char *provider; +}; + +void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup); +void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup); + +#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) && IS_ENABLED(CONFIG_X86) +void drm_privacy_screen_lookup_init(void); +void drm_privacy_screen_lookup_exit(void); +#else +static inline void drm_privacy_screen_lookup_init(void) +{ +} +static inline void drm_privacy_screen_lookup_exit(void) +{ +} +#endif + +#endif diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h new file mode 100644 index 000000000..5880daa14 --- /dev/null +++ b/include/drm/drm_probe_helper.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +#ifndef __DRM_PROBE_HELPER_H__ +#define __DRM_PROBE_HELPER_H__ + +#include <drm/drm_modes.h> + +struct drm_connector; +struct drm_crtc; +struct drm_device; +struct drm_modeset_acquire_ctx; + +int drm_helper_probe_single_connector_modes(struct drm_connector + *connector, uint32_t maxX, + uint32_t maxY); +int drm_helper_probe_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); +void drm_kms_helper_poll_init(struct drm_device *dev); +void drm_kms_helper_poll_fini(struct drm_device *dev); +bool drm_helper_hpd_irq_event(struct drm_device *dev); +bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector); +void drm_kms_helper_hotplug_event(struct drm_device *dev); +void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); + +void drm_kms_helper_poll_disable(struct drm_device *dev); +void drm_kms_helper_poll_enable(struct drm_device *dev); +bool drm_kms_helper_is_poll_worker(void); + +enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + const struct drm_display_mode *fixed_mode); + +int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector); +int drm_connector_helper_get_modes_fixed(struct drm_connector *connector, + const struct drm_display_mode *fixed_mode); +int drm_connector_helper_get_modes(struct drm_connector *connector); + +#endif diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h new file mode 100644 index 000000000..65bc9710a --- /dev/null +++ b/include/drm/drm_property.h @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PROPERTY_H__ +#define __DRM_PROPERTY_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode_object.h> + +#include <uapi/drm/drm_mode.h> + +/** + * struct drm_property_enum - symbolic values for enumerations + * @head: list of enum values, linked to &drm_property.enum_list + * @name: symbolic name for the enum + * + * For enumeration and bitmask properties this structure stores the symbolic + * decoding for each value. This is used for example for the rotation property. + */ +struct drm_property_enum { + /** + * @value: numeric property value for this enum entry + * + * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a + * bitshift, not a bitmask. In other words, the enum entry is enabled + * if the bit number @value is set in the property's value. This enum + * entry has the bitmask ``1 << value``. + */ + uint64_t value; + struct list_head head; + char name[DRM_PROP_NAME_LEN]; +}; + +/** + * struct drm_property - modeset object property + * + * This structure represent a modeset object property. It combines both the name + * of the property with the set of permissible values. This means that when a + * driver wants to use a property with the same name on different objects, but + * with different value ranges, then it must create property for each one. An + * example would be rotation of &drm_plane, when e.g. the primary plane cannot + * be rotated. But if both the name and the value range match, then the same + * property structure can be instantiated multiple times for the same object. + * Userspace must be able to cope with this and cannot assume that the same + * symbolic property will have the same modeset object ID on all modeset + * objects. + * + * Properties are created by one of the special functions, as explained in + * detail in the @flags structure member. + * + * To actually expose a property it must be attached to each object using + * drm_object_attach_property(). Currently properties can only be attached to + * &drm_connector, &drm_crtc and &drm_plane. + * + * Properties are also used as the generic metadatatransport for the atomic + * IOCTL. Everything that was set directly in structures in the legacy modeset + * IOCTLs (like the plane source or destination windows, or e.g. the links to + * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set. + */ +struct drm_property { + /** + * @head: per-device list of properties, for cleanup. + */ + struct list_head head; + + /** + * @base: base KMS object + */ + struct drm_mode_object base; + + /** + * @flags: + * + * Property flags and type. A property needs to be one of the following + * types: + * + * DRM_MODE_PROP_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is unsigned. Range properties are created using + * drm_property_create_range(). + * + * DRM_MODE_PROP_SIGNED_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is signed. Range properties are created using + * drm_property_create_signed_range(). + * + * DRM_MODE_PROP_ENUM + * Enumerated properties take a numerical value that ranges from 0 to + * the number of enumerated values defined by the property minus one, + * and associate a free-formed string name to each value. Applications + * can retrieve the list of defined value-name pairs and use the + * numerical value to get and set property instance values. Enum + * properties are created using drm_property_create_enum(). + * + * DRM_MODE_PROP_BITMASK + * Bitmask properties are enumeration properties that additionally + * restrict all enumerated values to the 0..63 range. Bitmask property + * instance values combine one or more of the enumerated bits defined + * by the property. Bitmask properties are created using + * drm_property_create_bitmask(). + * + * DRM_MODE_PROP_OBJECT + * Object properties are used to link modeset objects. This is used + * extensively in the atomic support to create the display pipeline, + * by linking &drm_framebuffer to &drm_plane, &drm_plane to + * &drm_crtc and &drm_connector to &drm_crtc. An object property can + * only link to a specific type of &drm_mode_object, this limit is + * enforced by the core. Object properties are created using + * drm_property_create_object(). + * + * Object properties work like blob properties, but in a more + * general fashion. They are limited to atomic drivers and must have + * the DRM_MODE_PROP_ATOMIC flag set. + * + * DRM_MODE_PROP_BLOB + * Blob properties store a binary blob without any format restriction. + * The binary blobs are created as KMS standalone objects, and blob + * property instance values store the ID of their associated blob + * object. Blob properties are created by calling + * drm_property_create() with DRM_MODE_PROP_BLOB as the type. + * + * Actual blob objects to contain blob data are created using + * drm_property_create_blob(), or through the corresponding IOCTL. + * + * Besides the built-in limit to only accept blob objects blob + * properties work exactly like object properties. The only reasons + * blob properties exist is backwards compatibility with existing + * userspace. + * + * In addition a property can have any combination of the below flags: + * + * DRM_MODE_PROP_ATOMIC + * Set for properties which encode atomic modeset state. Such + * properties are not exposed to legacy userspace. + * + * DRM_MODE_PROP_IMMUTABLE + * Set for properties whose values cannot be changed by + * userspace. The kernel is allowed to update the value of these + * properties. This is generally used to expose probe state to + * userspace, e.g. the EDID, or the connector path property on DP + * MST sinks. Kernel can update the value of an immutable property + * by calling drm_object_property_set_value(). + */ + uint32_t flags; + + /** + * @name: symbolic name of the properties + */ + char name[DRM_PROP_NAME_LEN]; + + /** + * @num_values: size of the @values array. + */ + uint32_t num_values; + + /** + * @values: + * + * Array with limits and values for the property. The + * interpretation of these limits is dependent upon the type per @flags. + */ + uint64_t *values; + + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @enum_list: + * + * List of &drm_prop_enum_list structures with the symbolic names for + * enum and bitmask values. + */ + struct list_head enum_list; +}; + +/** + * struct drm_property_blob - Blob data for &drm_property + * @base: base KMS object + * @dev: DRM device + * @head_global: entry on the global blob list in + * &drm_mode_config.property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file.blobs list. + * @length: size of the blob in bytes, invariant over the lifetime of the object + * @data: actual data, embedded at the end of this structure + * + * Blobs are used to store bigger values than what fits directly into the 64 + * bits available for a &drm_property. + * + * Blobs are reference counted using drm_property_blob_get() and + * drm_property_blob_put(). They are created using drm_property_create_blob(). + */ +struct drm_property_blob { + struct drm_mode_object base; + struct drm_device *dev; + struct list_head head_global; + struct list_head head_file; + size_t length; + void *data; +}; + +struct drm_prop_enum_list { + int type; + const char *name; +}; + +#define obj_to_property(x) container_of(x, struct drm_property, base) +#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) + +/** + * drm_property_type_is - check the type of a property + * @property: property to check + * @type: property type to compare with + * + * This is a helper function becauase the uapi encoding of property types is + * a bit special for historical reasons. + */ +static inline bool drm_property_type_is(struct drm_property *property, + uint32_t type) +{ + /* instanceof for props.. handles extended type vs original types: */ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; + return property->flags & type; +} + +struct drm_property *drm_property_create(struct drm_device *dev, + u32 flags, const char *name, + int num_values); +struct drm_property *drm_property_create_enum(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_values); +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_props, + uint64_t supported_bits); +struct drm_property *drm_property_create_range(struct drm_device *dev, + u32 flags, const char *name, + uint64_t min, uint64_t max); +struct drm_property *drm_property_create_signed_range(struct drm_device *dev, + u32 flags, const char *name, + int64_t min, int64_t max); +struct drm_property *drm_property_create_object(struct drm_device *dev, + u32 flags, const char *name, + uint32_t type); +struct drm_property *drm_property_create_bool(struct drm_device *dev, + u32 flags, const char *name); +int drm_property_add_enum(struct drm_property *property, + uint64_t value, const char *name); +void drm_property_destroy(struct drm_device *dev, struct drm_property *property); + +struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, + size_t length, + const void *data); +struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, + uint32_t id); +int drm_property_replace_global_blob(struct drm_device *dev, + struct drm_property_blob **replace, + size_t length, + const void *data, + struct drm_mode_object *obj_holds_id, + struct drm_property *prop_holds_id); +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob); +struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); +void drm_property_blob_put(struct drm_property_blob *blob); + +/** + * drm_property_find - find property object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: property object id + * + * This function looks up the property object specified by id and returns it. + */ +static inline struct drm_property *drm_property_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); + return mo ? obj_to_property(mo) : NULL; +} + +#endif diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h new file mode 100644 index 000000000..e8d94fca2 --- /dev/null +++ b/include/drm/drm_rect.h @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_RECT_H +#define DRM_RECT_H + +#include <linux/types.h> + +/** + * DOC: rect utils + * + * Utility functions to help manage rectangular areas for + * clipping, scaling, etc. calculations. + */ + +/** + * struct drm_rect - two dimensional rectangle + * @x1: horizontal starting coordinate (inclusive) + * @x2: horizontal ending coordinate (exclusive) + * @y1: vertical starting coordinate (inclusive) + * @y2: vertical ending coordinate (exclusive) + * + * Note that this must match the layout of struct drm_mode_rect or the damage + * helpers like drm_atomic_helper_damage_iter_init() break. + */ +struct drm_rect { + int x1, y1, x2, y2; +}; + +/** + * DRM_RECT_INIT - initialize a rectangle from x/y/w/h + * @x: x coordinate + * @y: y coordinate + * @w: width + * @h: height + * + * RETURNS: + * A new rectangle of the specified size. + */ +#define DRM_RECT_INIT(x, y, w, h) ((struct drm_rect){ \ + .x1 = (x), \ + .y1 = (y), \ + .x2 = (x) + (w), \ + .y2 = (y) + (h) }) + +/** + * DRM_RECT_FMT - printf string for &struct drm_rect + */ +#define DRM_RECT_FMT "%dx%d%+d%+d" +/** + * DRM_RECT_ARG - printf arguments for &struct drm_rect + * @r: rectangle struct + */ +#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 + +/** + * DRM_RECT_FP_FMT - printf string for &struct drm_rect in 16.16 fixed point + */ +#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" +/** + * DRM_RECT_FP_ARG - printf arguments for &struct drm_rect in 16.16 fixed point + * @r: rectangle struct + * + * This is useful for e.g. printing plane source rectangles, which are in 16.16 + * fixed point. + */ +#define DRM_RECT_FP_ARG(r) \ + drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ + drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ + (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ + (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 + +/** + * drm_rect_init - initialize the rectangle from x/y/w/h + * @r: rectangle + * @x: x coordinate + * @y: y coordinate + * @width: width + * @height: height + */ +static inline void drm_rect_init(struct drm_rect *r, int x, int y, + int width, int height) +{ + r->x1 = x; + r->y1 = y; + r->x2 = x + width; + r->y2 = y + height; +} + +/** + * drm_rect_adjust_size - adjust the size of the rectangle + * @r: rectangle to be adjusted + * @dw: horizontal adjustment + * @dh: vertical adjustment + * + * Change the size of rectangle @r by @dw in the horizontal direction, + * and by @dh in the vertical direction, while keeping the center + * of @r stationary. + * + * Positive @dw and @dh increase the size, negative values decrease it. + */ +static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) +{ + r->x1 -= dw >> 1; + r->y1 -= dh >> 1; + r->x2 += (dw + 1) >> 1; + r->y2 += (dh + 1) >> 1; +} + +/** + * drm_rect_translate - translate the rectangle + * @r: rectangle to be tranlated + * @dx: horizontal translation + * @dy: vertical translation + * + * Move rectangle @r by @dx in the horizontal direction, + * and by @dy in the vertical direction. + */ +static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) +{ + r->x1 += dx; + r->y1 += dy; + r->x2 += dx; + r->y2 += dy; +} + +/** + * drm_rect_translate_to - translate the rectangle to an absolute position + * @r: rectangle to be tranlated + * @x: horizontal position + * @y: vertical position + * + * Move rectangle @r to @x in the horizontal direction, + * and to @y in the vertical direction. + */ +static inline void drm_rect_translate_to(struct drm_rect *r, int x, int y) +{ + drm_rect_translate(r, x - r->x1, y - r->y1); +} + +/** + * drm_rect_downscale - downscale a rectangle + * @r: rectangle to be downscaled + * @horz: horizontal downscale factor + * @vert: vertical downscale factor + * + * Divide the coordinates of rectangle @r by @horz and @vert. + */ +static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert) +{ + r->x1 /= horz; + r->y1 /= vert; + r->x2 /= horz; + r->y2 /= vert; +} + +/** + * drm_rect_width - determine the rectangle width + * @r: rectangle whose width is returned + * + * RETURNS: + * The width of the rectangle. + */ +static inline int drm_rect_width(const struct drm_rect *r) +{ + return r->x2 - r->x1; +} + +/** + * drm_rect_height - determine the rectangle height + * @r: rectangle whose height is returned + * + * RETURNS: + * The height of the rectangle. + */ +static inline int drm_rect_height(const struct drm_rect *r) +{ + return r->y2 - r->y1; +} + +/** + * drm_rect_visible - determine if the rectangle is visible + * @r: rectangle whose visibility is returned + * + * RETURNS: + * %true if the rectangle is visible, %false otherwise. + */ +static inline bool drm_rect_visible(const struct drm_rect *r) +{ + return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; +} + +/** + * drm_rect_equals - determine if two rectangles are equal + * @r1: first rectangle + * @r2: second rectangle + * + * RETURNS: + * %true if the rectangles are equal, %false otherwise. + */ +static inline bool drm_rect_equals(const struct drm_rect *r1, + const struct drm_rect *r2) +{ + return r1->x1 == r2->x1 && r1->x2 == r2->x2 && + r1->y1 == r2->y1 && r1->y2 == r2->y2; +} + +/** + * drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form. + * @dst: rect to be stored the converted value + * @src: rect in 16.16 fixed point form + */ +static inline void drm_rect_fp_to_int(struct drm_rect *dst, + const struct drm_rect *src) +{ + drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16, + drm_rect_width(src) >> 16, + drm_rect_height(src) >> 16); +} + +bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); +bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, + const struct drm_rect *clip); +int drm_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_vscale, int max_vscale); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); +void drm_rect_rotate(struct drm_rect *r, + int width, int height, + unsigned int rotation); +void drm_rect_rotate_inv(struct drm_rect *r, + int width, int height, + unsigned int rotation); + +#endif diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h new file mode 100644 index 000000000..520235c20 --- /dev/null +++ b/include/drm/drm_self_refresh_helper.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2019 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ +#ifndef DRM_SELF_REFRESH_HELPER_H_ +#define DRM_SELF_REFRESH_HELPER_H_ + +struct drm_atomic_state; +struct drm_crtc; + +void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state); +void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms, + unsigned int new_self_refresh_mask); + +int drm_self_refresh_helper_init(struct drm_crtc *crtc); +void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc); +#endif diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h new file mode 100644 index 000000000..0b3647e61 --- /dev/null +++ b/include/drm/drm_simple_kms_helper.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2016 Noralf Trønnes + */ + +#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H +#define __LINUX_DRM_SIMPLE_KMS_HELPER_H + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> +#include <drm/drm_plane.h> + +struct drm_simple_display_pipe; + +/** + * struct drm_simple_display_pipe_funcs - helper operations for a simple + * display pipeline + */ +struct drm_simple_display_pipe_funcs { + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in the + * crtc used in this simple display pipe. This should be implemented + * if the display pipe has some sort of restriction in the modes + * it can display. For example, a given display pipe may be responsible + * to set a clock value. If the clock can not produce all the values + * for the available modes then this callback can be used to restrict + * the number of modes to only the ones that can be displayed. Another + * reason can be bandwidth mitigation: the memory port on the display + * controller can have bandwidth limitations not allowing pixel data + * to be fetched at any rate. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardware constraints. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode); + + /** + * @enable: + * + * This function should be used to enable the pipeline. + * It is called when the underlying crtc is enabled. + * This hook is optional. + */ + void (*enable)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state); + /** + * @disable: + * + * This function should be used to disable the pipeline. + * It is called when the underlying crtc is disabled. + * This hook is optional. + */ + void (*disable)(struct drm_simple_display_pipe *pipe); + + /** + * @check: + * + * This function is called in the check phase of an atomic update, + * specifically when the underlying plane is checked. + * The simple display pipeline helpers already check that the plane is + * not scaled, fills the entire visible area and is always enabled + * when the crtc is also enabled. + * This hook is optional. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*check)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state); + /** + * @update: + * + * This function is called when the underlying plane state is updated. + * This hook is optional. + * + * This is the function drivers should submit the + * &drm_pending_vblank_event from. Using either + * drm_crtc_arm_vblank_event(), when the driver supports vblank + * interrupt handling, or drm_crtc_send_vblank_event() for more + * complex case. In case the hardware lacks vblank support entirely, + * drivers can set &struct drm_crtc_state.no_vblank in + * &struct drm_simple_display_pipe_funcs.check and let DRM's + * atomic helper fake a vblank event. + */ + void (*update)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_plane_state); + + /** + * @prepare_fb: + * + * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read + * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for + * more details. + * + * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook + * set drm_gem_simple_display_pipe_prepare_fb() is called automatically + * to implement this. Other drivers which need additional plane + * processing can call drm_gem_simple_display_pipe_prepare_fb() from + * their @prepare_fb hook. + */ + int (*prepare_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @cleanup_fb: + * + * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read + * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for + * more details. + */ + void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @enable_vblank: + * + * Optional, called by &drm_crtc_funcs.enable_vblank. Please read + * the documentation for the &drm_crtc_funcs.enable_vblank hook for + * more details. + */ + int (*enable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @disable_vblank: + * + * Optional, called by &drm_crtc_funcs.disable_vblank. Please read + * the documentation for the &drm_crtc_funcs.disable_vblank hook for + * more details. + */ + void (*disable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @reset_crtc: + * + * Optional, called by &drm_crtc_funcs.reset. Please read the + * documentation for the &drm_crtc_funcs.reset hook for more details. + */ + void (*reset_crtc)(struct drm_simple_display_pipe *pipe); + + /** + * @duplicate_crtc_state: + * + * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please + * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state + * hook for more details. + */ + struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe); + + /** + * @destroy_crtc_state: + * + * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please + * read the documentation for the &drm_crtc_funcs.atomic_destroy_state + * hook for more details. + */ + void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state); + + /** + * @reset_plane: + * + * Optional, called by &drm_plane_funcs.reset. Please read the + * documentation for the &drm_plane_funcs.reset hook for more details. + */ + void (*reset_plane)(struct drm_simple_display_pipe *pipe); + + /** + * @duplicate_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please + * read the documentation for the &drm_plane_funcs.atomic_duplicate_state + * hook for more details. + */ + struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe); + + /** + * @destroy_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please + * read the documentation for the &drm_plane_funcs.atomic_destroy_state + * hook for more details. + */ + void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +}; + +/** + * struct drm_simple_display_pipe - simple display pipeline + * @crtc: CRTC control structure + * @plane: Plane control structure + * @encoder: Encoder control structure + * @connector: Connector control structure + * @funcs: Pipeline control functions (optional) + * + * Simple display pipeline with plane, crtc and encoder collapsed into one + * entity. It should be initialized by calling drm_simple_display_pipe_init(). + */ +struct drm_simple_display_pipe { + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector *connector; + + const struct drm_simple_display_pipe_funcs *funcs; +}; + +int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, + struct drm_bridge *bridge); + +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + struct drm_connector *connector); + +int drm_simple_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + int encoder_type); + +void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size, + size_t offset, int encoder_type); + +/** + * drmm_simple_encoder_alloc - Allocate and initialize an encoder with basic + * functionality. + * @dev: drm device + * @type: the type of the struct which contains struct &drm_encoder + * @member: the name of the &drm_encoder within @type. + * @encoder_type: user visible type of the encoder + * + * Allocates and initializes an encoder that has no further functionality. + * Settings for possible CRTC and clones are left to their initial values. + * Cleanup is automatically handled through registering drm_encoder_cleanup() + * with drmm_add_action(). + * + * Returns: + * Pointer to new encoder, or ERR_PTR on failure. + */ +#define drmm_simple_encoder_alloc(dev, type, member, encoder_type) \ + ((type *)__drmm_simple_encoder_alloc(dev, sizeof(type), \ + offsetof(type, member), \ + encoder_type)) + +#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h new file mode 100644 index 000000000..6cf7243a1 --- /dev/null +++ b/include/drm/drm_syncobj.h @@ -0,0 +1,132 @@ +/* + * Copyright © 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ +#ifndef __DRM_SYNCOBJ_H__ +#define __DRM_SYNCOBJ_H__ + +#include <linux/dma-fence.h> +#include <linux/dma-fence-chain.h> + +struct drm_file; + +/** + * struct drm_syncobj - sync object. + * + * This structure defines a generic sync object which wraps a &dma_fence. + */ +struct drm_syncobj { + /** + * @refcount: Reference count of this object. + */ + struct kref refcount; + /** + * @fence: + * NULL or a pointer to the fence bound to this object. + * + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. + */ + struct dma_fence __rcu *fence; + /** + * @cb_list: List of callbacks to call when the &fence gets replaced. + */ + struct list_head cb_list; + /** + * @lock: Protects &cb_list and write-locks &fence. + */ + spinlock_t lock; + /** + * @file: A file backing for this syncobj. + */ + struct file *file; +}; + +void drm_syncobj_free(struct kref *kref); + +/** + * drm_syncobj_get - acquire a syncobj reference + * @obj: sync object + * + * This acquires an additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ +static inline void +drm_syncobj_get(struct drm_syncobj *obj) +{ + kref_get(&obj->refcount); +} + +/** + * drm_syncobj_put - release a reference to a sync object. + * @obj: sync object. + */ +static inline void +drm_syncobj_put(struct drm_syncobj *obj) +{ + kref_put(&obj->refcount, drm_syncobj_free); +} + +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ +static inline struct dma_fence * +drm_syncobj_fence_get(struct drm_syncobj *syncobj) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&syncobj->fence); + rcu_read_unlock(); + + return fence; +} + +struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, + u32 handle); +void drm_syncobj_add_point(struct drm_syncobj *syncobj, + struct dma_fence_chain *chain, + struct dma_fence *fence, + uint64_t point); +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, + struct dma_fence *fence); +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, u64 point, u64 flags, + struct dma_fence **fence); +void drm_syncobj_free(struct kref *kref); +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, + struct dma_fence *fence); +int drm_syncobj_get_handle(struct drm_file *file_private, + struct drm_syncobj *syncobj, u32 *handle); +int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd); + +#endif diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h new file mode 100644 index 000000000..6273cac44 --- /dev/null +++ b/include/drm/drm_sysfs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DRM_SYSFS_H_ +#define _DRM_SYSFS_H_ + +struct drm_device; +struct device; +struct drm_connector; +struct drm_property; + +int drm_class_device_register(struct device *dev); +void drm_class_device_unregister(struct device *dev); + +void drm_sysfs_hotplug_event(struct drm_device *dev); +void drm_sysfs_connector_hotplug_event(struct drm_connector *connector); +void drm_sysfs_connector_status_event(struct drm_connector *connector, + struct drm_property *property); +#endif diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h new file mode 100644 index 000000000..79952d8c4 --- /dev/null +++ b/include/drm/drm_util.h @@ -0,0 +1,83 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_UTIL_H_ +#define _DRM_UTIL_H_ + +/** + * DOC: drm utils + * + * Macros and inline functions that does not naturally belong in other places + */ + +#include <linux/interrupt.h> +#include <linux/kgdb.h> +#include <linux/preempt.h> +#include <linux/smp.h> + +/* + * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall + * only be visible for drmselftests. + */ +#if defined(CONFIG_DRM_EXPORT_FOR_TESTS) +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x) +#else +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) +#endif + +/** + * for_each_if - helper for handling conditionals in various for_each macros + * @condition: The condition to check + * + * Typical use:: + * + * #define for_each_foo_bar(x, y) \' + * list_for_each_entry(x, y->list, head) \' + * for_each_if(x->something == SOMETHING) + * + * The for_each_if() macro makes the use of for_each_foo_bar() less error + * prone. + */ +#define for_each_if(condition) if (!(condition)) {} else + +/** + * drm_can_sleep - returns true if currently okay to sleep + * + * This function shall not be used in new code. + * The check for running in atomic context may not work - see linux/preempt.h. + * + * FIXME: All users of drm_can_sleep should be removed (see todo.rst) + * + * Returns: + * False if kgdb is active, we are in atomic context or irqs are disabled. + */ +static inline bool drm_can_sleep(void) +{ + if (in_atomic() || in_dbg_master() || irqs_disabled()) + return false; + return true; +} + +#endif diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000..70775748d --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +#include <linux/types.h> + +int drm_get_panel_orientation_quirk(int width, int height); + +signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec); + +#endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h new file mode 100644 index 000000000..733a3e2d1 --- /dev/null +++ b/include/drm/drm_vblank.h @@ -0,0 +1,280 @@ +/* + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_VBLANK_H_ +#define _DRM_VBLANK_H_ + +#include <linux/seqlock.h> +#include <linux/idr.h> +#include <linux/poll.h> +#include <linux/kthread.h> + +#include <drm/drm_file.h> +#include <drm/drm_modes.h> + +struct drm_device; +struct drm_crtc; +struct drm_vblank_work; + +/** + * struct drm_pending_vblank_event - pending vblank event tracking + */ +struct drm_pending_vblank_event { + /** + * @base: Base structure for tracking pending DRM events. + */ + struct drm_pending_event base; + /** + * @pipe: drm_crtc_index() of the &drm_crtc this event is for. + */ + unsigned int pipe; + /** + * @sequence: frame event should be triggered at + */ + u64 sequence; + /** + * @event: Actual event which will be sent to userspace. + */ + union { + /** + * @event.base: DRM event base class. + */ + struct drm_event base; + + /** + * @event.vbl: + * + * Event payload for vblank events, requested through + * either the MODE_PAGE_FLIP or MODE_ATOMIC IOCTL. Also + * generated by the legacy WAIT_VBLANK IOCTL, but new userspace + * should use MODE_QUEUE_SEQUENCE and &event.seq instead. + */ + struct drm_event_vblank vbl; + + /** + * @event.seq: Event payload for the MODE_QUEUEU_SEQUENCE IOCTL. + */ + struct drm_event_crtc_sequence seq; + } event; +}; + +/** + * struct drm_vblank_crtc - vblank tracking for a CRTC + * + * This structure tracks the vblank state for one CRTC. + * + * Note that for historical reasons - the vblank handling code is still shared + * with legacy/non-kms drivers - this is a free-standing structure not directly + * connected to &struct drm_crtc. But all public interface functions are taking + * a &struct drm_crtc to hide this implementation detail. + */ +struct drm_vblank_crtc { + /** + * @dev: Pointer to the &drm_device. + */ + struct drm_device *dev; + /** + * @queue: Wait queue for vblank waiters. + */ + wait_queue_head_t queue; + /** + * @disable_timer: Disable timer for the delayed vblank disabling + * hysteresis logic. Vblank disabling is controlled through the + * drm_vblank_offdelay module option and the setting of the + * &drm_device.max_vblank_count value. + */ + struct timer_list disable_timer; + + /** + * @seqlock: Protect vblank count and time. + */ + seqlock_t seqlock; + + /** + * @count: + * + * Current software vblank counter. + * + * Note that for a given vblank counter value drm_crtc_handle_vblank() + * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() + * provide a barrier: Any writes done before calling + * drm_crtc_handle_vblank() will be visible to callers of the later + * functions, iff the vblank count is the same or a later one. + * + * IMPORTANT: This guarantee requires barriers, therefor never access + * this field directly. Use drm_crtc_vblank_count() instead. + */ + atomic64_t count; + /** + * @time: Vblank timestamp corresponding to @count. + */ + ktime_t time; + + /** + * @refcount: Number of users/waiters of the vblank interrupt. Only when + * this refcount reaches 0 can the hardware interrupt be disabled using + * @disable_timer. + */ + atomic_t refcount; + /** + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. + */ + u32 last; + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers for this crtc. This value +1 + * will result in a wrap-around of the vblank register. It is used + * by the vblank core to handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the runtime configurable per-crtc maximum set through + * drm_crtc_set_max_vblank_count(). If this is used the driver + * must leave the device wide &drm_device.max_vblank_count at zero. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; + /** + * @inmodeset: Tracks whether the vblank is disabled due to a modeset. + * For legacy driver bit 2 additionally tracks whether an additional + * temporary vblank reference has been acquired to paper over the + * hardware counter resetting/jumping. KMS drivers should instead just + * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly + * save and restore the vblank count. + */ + unsigned int inmodeset; + /** + * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this + * structure. + */ + unsigned int pipe; + /** + * @framedur_ns: Frame/Field duration in ns, used by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by + * drm_calc_timestamping_constants(). + */ + int framedur_ns; + /** + * @linedur_ns: Line duration in ns, used by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by + * drm_calc_timestamping_constants(). + */ + int linedur_ns; + + /** + * @hwmode: + * + * Cache of the current hardware display mode. Only valid when @enabled + * is set. This is used by helpers like + * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access + * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode, + * because that one is really hard to get from interrupt context. + */ + struct drm_display_mode hwmode; + + /** + * @enabled: Tracks the enabling state of the corresponding &drm_crtc to + * avoid double-disabling and hence corrupting saved state. Needed by + * drivers not using atomic KMS, since those might go through their CRTC + * disabling functions multiple times. + */ + bool enabled; + + /** + * @worker: The &kthread_worker used for executing vblank works. + */ + struct kthread_worker *worker; + + /** + * @pending_work: A list of scheduled &drm_vblank_work items that are + * waiting for a future vblank. + */ + struct list_head pending_work; + + /** + * @work_wait_queue: The wait queue used for signaling that a + * &drm_vblank_work item has either finished executing, or was + * cancelled. + */ + wait_queue_head_t work_wait_queue; +}; + +int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +bool drm_dev_has_vblank(const struct drm_device *dev); +u64 drm_crtc_vblank_count(struct drm_crtc *crtc); +u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + ktime_t *vblanktime); +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_vblank_set_event(struct drm_pending_vblank_event *e, + u64 *seq, + ktime_t *now); +bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +int drm_crtc_vblank_get(struct drm_crtc *crtc); +void drm_crtc_vblank_put(struct drm_crtc *crtc); +void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); +void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); +void drm_crtc_vblank_off(struct drm_crtc *crtc); +void drm_crtc_vblank_reset(struct drm_crtc *crtc); +void drm_crtc_vblank_on(struct drm_crtc *crtc); +u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); +void drm_crtc_vblank_restore(struct drm_crtc *crtc); + +void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode); +wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, + u32 max_vblank_count); + +/* + * Helpers for struct drm_crtc_funcs + */ + +typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, + ktime_t *etime, + const struct drm_display_mode *mode); + +bool +drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq, + drm_vblank_get_scanout_position_func get_scanout_position); +bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); + +#endif diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h new file mode 100644 index 000000000..eb41d0810 --- /dev/null +++ b/include/drm/drm_vblank_work.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_VBLANK_WORK_H_ +#define _DRM_VBLANK_WORK_H_ + +#include <linux/kthread.h> + +struct drm_crtc; + +/** + * struct drm_vblank_work - A delayed work item which delays until a target + * vblank passes, and then executes at realtime priority outside of IRQ + * context. + * + * See also: + * drm_vblank_work_schedule() + * drm_vblank_work_init() + * drm_vblank_work_cancel_sync() + * drm_vblank_work_flush() + */ +struct drm_vblank_work { + /** + * @base: The base &kthread_work item which will be executed by + * &drm_vblank_crtc.worker. Drivers should not interact with this + * directly, and instead rely on drm_vblank_work_init() to initialize + * this. + */ + struct kthread_work base; + + /** + * @vblank: A pointer to &drm_vblank_crtc this work item belongs to. + */ + struct drm_vblank_crtc *vblank; + + /** + * @count: The target vblank this work will execute on. Drivers should + * not modify this value directly, and instead use + * drm_vblank_work_schedule() + */ + u64 count; + + /** + * @cancelling: The number of drm_vblank_work_cancel_sync() calls that + * are currently running. A work item cannot be rescheduled until all + * calls have finished. + */ + int cancelling; + + /** + * @node: The position of this work item in + * &drm_vblank_crtc.pending_work. + */ + struct list_head node; +}; + +/** + * to_drm_vblank_work - Retrieve the respective &drm_vblank_work item from a + * &kthread_work + * @_work: The &kthread_work embedded inside a &drm_vblank_work + */ +#define to_drm_vblank_work(_work) \ + container_of((_work), struct drm_vblank_work, base) + +int drm_vblank_work_schedule(struct drm_vblank_work *work, + u64 count, bool nextonmiss); +void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, + void (*func)(struct kthread_work *work)); +bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work); +void drm_vblank_work_flush(struct drm_vblank_work *work); + +#endif /* !_DRM_VBLANK_WORK_H_ */ diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h new file mode 100644 index 000000000..6c2a2f21d --- /dev/null +++ b/include/drm/drm_vma_manager.h @@ -0,0 +1,247 @@ +#ifndef __DRM_VMA_MANAGER_H__ +#define __DRM_VMA_MANAGER_H__ + +/* + * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm_mm.h> +#include <linux/mm.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +/* We make up offsets for buffer objects so we can recognize them at + * mmap time. pgoff in mmap is an unsigned long, so we need to make sure + * that the faked up offset will fit + */ +#if BITS_PER_LONG == 64 +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256) +#else +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) +#endif + +struct drm_file; + +struct drm_vma_offset_file { + struct rb_node vm_rb; + struct drm_file *vm_tag; + unsigned long vm_count; +}; + +struct drm_vma_offset_node { + rwlock_t vm_lock; + struct drm_mm_node vm_node; + struct rb_root vm_files; + void *driver_private; +}; + +struct drm_vma_offset_manager { + rwlock_t vm_lock; + struct drm_mm vm_addr_space_mm; +}; + +void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, + unsigned long page_offset, unsigned long size); +void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); + +struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages); +int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node, unsigned long pages); +void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node); + +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); +int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag); +void drm_vma_node_revoke(struct drm_vma_offset_node *node, + struct drm_file *tag); +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct drm_file *tag); + +/** + * drm_vma_offset_exact_lookup_locked() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup_locked(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +} + +/** + * drm_vma_offset_lock_lookup() - Lock lookup for extended private use + * @mgr: Manager object + * + * Lock VMA manager for extended lookups. Only locked VMA function calls + * are allowed while holding this lock. All other contexts are blocked from VMA + * until the lock is released via drm_vma_offset_unlock_lookup(). + * + * Use this if you need to take a reference to the objects returned by + * drm_vma_offset_lookup_locked() before releasing this lock again. + * + * This lock must not be used for anything else than extended lookups. You must + * not call any other VMA helpers while holding this lock. + * + * Note: You're in atomic-context while holding this lock! + */ +static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_lock(&mgr->vm_lock); +} + +/** + * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use + * @mgr: Manager object + * + * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information. + */ +static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_unlock(&mgr->vm_lock); +} + +/** + * drm_vma_node_reset() - Initialize or reset node object + * @node: Node to initialize or reset + * + * Reset a node to its initial state. This must be called before using it with + * any VMA offset manager. + * + * This must not be called on an already allocated node, or you will leak + * memory. + */ +static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) +{ + memset(node, 0, sizeof(*node)); + node->vm_files = RB_ROOT; + rwlock_init(&node->vm_lock); +} + +/** + * drm_vma_node_start() - Return start address for page-based addressing + * @node: Node to inspect + * + * Return the start address of the given node. This can be used as offset into + * the linear VM space that is provided by the VMA offset manager. Note that + * this can only be used for page-based addressing. If you need a proper offset + * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the + * drm_vma_node_offset_addr() helper instead. + * + * RETURNS: + * Start address of @node for page-based addressing. 0 if the node does not + * have an offset allocated. + */ +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) +{ + return node->vm_node.start; +} + +/** + * drm_vma_node_size() - Return size (page-based) + * @node: Node to inspect + * + * Return the size as number of pages for the given node. This is the same size + * that was passed to drm_vma_offset_add(). If no offset is allocated for the + * node, this is 0. + * + * RETURNS: + * Size of @node as number of pages. 0 if the node does not have an offset + * allocated. + */ +static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) +{ + return node->vm_node.size; +} + +/** + * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps + * @node: Linked offset node + * + * Same as drm_vma_node_start() but returns the address as a valid offset that + * can be used for user-space mappings during mmap(). + * This must not be called on unlinked nodes. + * + * RETURNS: + * Offset of @node for byte-based addressing. 0 if the node does not have an + * object allocated. + */ +static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) +{ + return ((__u64)node->vm_node.start) << PAGE_SHIFT; +} + +/** + * drm_vma_node_unmap() - Unmap offset node + * @node: Offset node + * @file_mapping: Address space to unmap @node from + * + * Unmap all userspace mappings for a given offset node. The mappings must be + * associated with the @file_mapping address-space. If no offset exists + * nothing is done. + * + * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() + * is not called on this node concurrently. + */ +static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, + struct address_space *file_mapping) +{ + if (drm_mm_node_allocated(&node->vm_node)) + unmap_mapping_range(file_mapping, + drm_vma_node_offset_addr(node), + drm_vma_node_size(node) << PAGE_SHIFT, 1); +} + +/** + * drm_vma_node_verify_access() - Access verification helper for TTM + * @node: Offset node + * @tag: Tag of file to check + * + * This checks whether @tag is granted access to @node. It is the same as + * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM + * verify_access() callbacks. + * + * RETURNS: + * 0 if access is granted, -EACCES otherwise. + */ +static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, + struct drm_file *tag) +{ + return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES; +} + +#endif /* __DRM_VMA_MANAGER_H__ */ diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h new file mode 100644 index 000000000..17e576c80 --- /dev/null +++ b/include/drm/drm_writeback.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Brian Starkey <brian.starkey@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + */ + +#ifndef __DRM_WRITEBACK_H__ +#define __DRM_WRITEBACK_H__ +#include <drm/drm_connector.h> +#include <drm/drm_encoder.h> +#include <linux/workqueue.h> + +/** + * struct drm_writeback_connector - DRM writeback connector + */ +struct drm_writeback_connector { + /** + * @base: base drm_connector object + */ + struct drm_connector base; + + /** + * @encoder: Internal encoder used by the connector to fulfill + * the DRM framework requirements. The users of the + * @drm_writeback_connector control the behaviour of the @encoder + * by passing the @enc_funcs parameter to drm_writeback_connector_init() + * function. + * For users of drm_writeback_connector_init_with_encoder(), this field + * is not valid as the encoder is managed within their drivers. + */ + struct drm_encoder encoder; + + /** + * @pixel_formats_blob_ptr: + * + * DRM blob property data for the pixel formats list on writeback + * connectors + * See also drm_writeback_connector_init() + */ + struct drm_property_blob *pixel_formats_blob_ptr; + + /** @job_lock: Protects job_queue */ + spinlock_t job_lock; + + /** + * @job_queue: + * + * Holds a list of a connector's writeback jobs; the last item is the + * most recent. The first item may be either waiting for the hardware + * to begin writing, or currently being written. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct list_head job_queue; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the connector's timeline. + */ + unsigned long fence_seqno; + /** + * @timeline_name: + * + * The name of the connector's fence timeline. + */ + char timeline_name[32]; +}; + +/** + * struct drm_writeback_job - DRM writeback job + */ +struct drm_writeback_job { + /** + * @connector: + * + * Back-pointer to the writeback connector associated with the job + */ + struct drm_writeback_connector *connector; + + /** + * @prepared: + * + * Set when the job has been prepared with drm_writeback_prepare_job() + */ + bool prepared; + + /** + * @cleanup_work: + * + * Used to allow drm_writeback_signal_completion to defer dropping the + * framebuffer reference to a workqueue + */ + struct work_struct cleanup_work; + + /** + * @list_entry: + * + * List item for the writeback connector's @job_queue + */ + struct list_head list_entry; + + /** + * @fb: + * + * Framebuffer to be written to by the writeback connector. Do not set + * directly, use drm_writeback_set_fb() + */ + struct drm_framebuffer *fb; + + /** + * @out_fence: + * + * Fence which will signal once the writeback has completed + */ + struct dma_fence *out_fence; + + /** + * @priv: + * + * Driver-private data + */ + void *priv; +}; + +static inline struct drm_writeback_connector * +drm_connector_to_writeback(struct drm_connector *connector) +{ + return container_of(connector, struct drm_writeback_connector, base); +} + +int drm_writeback_connector_init(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + const struct drm_connector_funcs *con_funcs, + const struct drm_encoder_helper_funcs *enc_helper_funcs, + const u32 *formats, int n_formats, + u32 possible_crtcs); + +int drm_writeback_connector_init_with_encoder(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + struct drm_encoder *enc, + const struct drm_connector_funcs *con_funcs, const u32 *formats, + int n_formats); + +int drm_writeback_set_fb(struct drm_connector_state *conn_state, + struct drm_framebuffer *fb); + +int drm_writeback_prepare_job(struct drm_writeback_job *job); + +void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector, + struct drm_connector_state *conn_state); + +void drm_writeback_cleanup_job(struct drm_writeback_job *job); + +void +drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector, + int status); + +struct dma_fence * +drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector); +#endif diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h new file mode 100644 index 000000000..228f43e8d --- /dev/null +++ b/include/drm/gma_drm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/************************************************************************** + * Copyright (c) 2007-2011, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + **************************************************************************/ + +#ifndef _GMA_DRM_H_ +#define _GMA_DRM_H_ + +#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000..17e7e3145 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,545 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include <drm/spsc_queue.h> +#include <linux/dma-fence.h> +#include <linux/completion.h> +#include <linux/xarray.h> +#include <linux/workqueue.h> + +#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) + +/** + * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * + * Setting this flag on a scheduler fence prevents pipelining of jobs depending + * on this fence. In other words we always insert a full CPU round trip before + * dependen jobs are pushed to the hw queue. + */ +#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS + +struct drm_gem_object; + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +/* These are often used as an (initial) index + * to an array, and as such should start at 0. + */ +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_KERNEL, + + DRM_SCHED_PRIORITY_COUNT +}; + +/** + * struct drm_sched_entity - A wrapper around a job queue (typically + * attached to the DRM file_priv). + * + * Entities will emit jobs in order to their corresponding hardware + * ring, and the scheduler will alternate between entities based on + * scheduling policy. + */ +struct drm_sched_entity { + /** + * @list: + * + * Used to append this struct to the list of entities in the runqueue + * @rq under &drm_sched_rq.entities. + * + * Protected by &drm_sched_rq.lock of @rq. + */ + struct list_head list; + + /** + * @rq: + * + * Runqueue on which this entity is currently scheduled. + * + * FIXME: Locking is very unclear for this. Writers are protected by + * @rq_lock, but readers are generally lockless and seem to just race + * with not even a READ_ONCE. + */ + struct drm_sched_rq *rq; + + /** + * @sched_list: + * + * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can + * be scheduled on any scheduler on this list. + * + * This can be modified by calling drm_sched_entity_modify_sched(). + * Locking is entirely up to the driver, see the above function for more + * details. + * + * This will be set to NULL if &num_sched_list equals 1 and @rq has been + * set already. + * + * FIXME: This means priority changes through + * drm_sched_entity_set_priority() will be lost henceforth in this case. + */ + struct drm_gpu_scheduler **sched_list; + + /** + * @num_sched_list: + * + * Number of drm_gpu_schedulers in the @sched_list. + */ + unsigned int num_sched_list; + + /** + * @priority: + * + * Priority of the entity. This can be modified by calling + * drm_sched_entity_set_priority(). Protected by &rq_lock. + */ + enum drm_sched_priority priority; + + /** + * @rq_lock: + * + * Lock to modify the runqueue to which this entity belongs. + */ + spinlock_t rq_lock; + + /** + * @job_queue: the list of jobs of this entity. + */ + struct spsc_queue job_queue; + + /** + * @fence_seq: + * + * A linearly increasing seqno incremented with each new + * &drm_sched_fence which is part of the entity. + * + * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, + * this doesn't need to be atomic. + */ + atomic_t fence_seq; + + /** + * @fence_context: + * + * A unique context for all the fences which belong to this entity. The + * &drm_sched_fence.scheduled uses the fence_context but + * &drm_sched_fence.finished uses fence_context + 1. + */ + uint64_t fence_context; + + /** + * @dependency: + * + * The dependency fence of the job which is on the top of the job queue. + */ + struct dma_fence *dependency; + + /** + * @cb: + * + * Callback for the dependency fence above. + */ + struct dma_fence_cb cb; + + /** + * @guilty: + * + * Points to entities' guilty. + */ + atomic_t *guilty; + + /** + * @last_scheduled: + * + * Points to the finished fence of the last scheduled job. Only written + * by the scheduler thread, can be accessed locklessly from + * drm_sched_job_arm() iff the queue is empty. + */ + struct dma_fence *last_scheduled; + + /** + * @last_user: last group leader pushing a job into the entity. + */ + struct task_struct *last_user; + + /** + * @stopped: + * + * Marks the enity as removed from rq and destined for + * termination. This is set by calling drm_sched_entity_flush() and by + * drm_sched_fini(). + */ + bool stopped; + + /** + * @entity_idle: + * + * Signals when entity is not in use, used to sequence entity cleanup in + * drm_sched_entity_fini(). + */ + struct completion entity_idle; +}; + +/** + * struct drm_sched_rq - queue of entities to be scheduled. + * + * @lock: to modify the entities list. + * @sched: the scheduler to which this rq belongs to. + * @entities: list of the entities to be scheduled. + * @current_entity: the entity which is to be scheduled. + * + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. + */ +struct drm_sched_rq { + spinlock_t lock; + struct drm_gpu_scheduler *sched; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +/** + * struct drm_sched_fence - fences corresponding to the scheduling of a job. + */ +struct drm_sched_fence { + /** + * @scheduled: this fence is what will be signaled by the scheduler + * when the job is scheduled. + */ + struct dma_fence scheduled; + + /** + * @finished: this fence is what will be signaled by the scheduler + * when the job is completed. + * + * When setting up an out fence for the job, you should use + * this, since it's available immediately upon + * drm_sched_job_init(), and the fence returned by the driver + * from run_job() won't be created until the dependencies have + * resolved. + */ + struct dma_fence finished; + + /** + * @parent: the fence returned by &drm_sched_backend_ops.run_job + * when scheduling the job on hardware. We signal the + * &drm_sched_fence.finished fence once parent is signalled. + */ + struct dma_fence *parent; + /** + * @sched: the scheduler instance to which the job having this struct + * belongs to. + */ + struct drm_gpu_scheduler *sched; + /** + * @lock: the lock used by the scheduled and the finished fences. + */ + spinlock_t lock; + /** + * @owner: job owner for debugging + */ + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +/** + * struct drm_sched_job - A job to be run by an entity. + * + * @queue_node: used to append this struct to the queue of jobs in an entity. + * @list: a job participates in a "pending" and "done" lists. + * @sched: the scheduler instance on which this job is scheduled. + * @s_fence: contains the fences for the scheduling of job. + * @finish_cb: the callback for the finished fence. + * @work: Helper to reschdeule job kill to different context. + * @id: a unique id assigned to each job scheduled on the scheduler. + * @karma: increment on every hang caused by this job. If this exceeds the hang + * limit of the scheduler then the job is marked guilty and will not + * be scheduled further. + * @s_priority: the priority of the job. + * @entity: the entity to which this job belongs. + * @cb: the callback for the parent fence in s_fence. + * + * A job is created by the driver using drm_sched_job_init(), and + * should call drm_sched_entity_push_job() once it wants the scheduler + * to schedule the job. + */ +struct drm_sched_job { + struct spsc_node queue_node; + struct list_head list; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + + /* + * work is used only after finish_cb has been used and will not be + * accessed anymore. + */ + union { + struct dma_fence_cb finish_cb; + struct work_struct work; + }; + + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; + struct drm_sched_entity *entity; + struct dma_fence_cb cb; + /** + * @dependencies: + * + * Contains the dependencies as struct dma_fence for this job, see + * drm_sched_job_add_dependency() and + * drm_sched_job_add_implicit_dependencies(). + */ + struct xarray dependencies; + + /** @last_dependency: tracks @dependencies as they signal */ + unsigned long last_dependency; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return s_job && atomic_inc_return(&s_job->karma) > threshold; +} + +enum drm_gpu_sched_stat { + DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NOMINAL, + DRM_GPU_SCHED_STAT_ENODEV, +}; + +/** + * struct drm_sched_backend_ops - Define the backend operations + * called by the scheduler + * + * These functions should be implemented in the driver side. + */ +struct drm_sched_backend_ops { + /** + * @dependency: + * + * Called when the scheduler is considering scheduling this job next, to + * get another struct dma_fence for this job to block on. Once it + * returns NULL, run_job() may be called. + * + * If a driver exclusively uses drm_sched_job_add_dependency() and + * drm_sched_job_add_implicit_dependencies() this can be ommitted and + * left as NULL. + */ + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + + /** + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. This may be called multiple times, if + * timedout_job() has happened and drm_sched_job_recovery() + * decides to try it again. + */ + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + + /** + * @timedout_job: Called when a job has taken too long to execute, + * to trigger GPU recovery. + * + * This method is called in a workqueue context. + * + * Drivers typically issue a reset to recover from GPU hangs, and this + * procedure usually follows the following workflow: + * + * 1. Stop the scheduler using drm_sched_stop(). This will park the + * scheduler thread and cancel the timeout work, guaranteeing that + * nothing is queued while we reset the hardware queue + * 2. Try to gracefully stop non-faulty jobs (optional) + * 3. Issue a GPU reset (driver-specific) + * 4. Re-submit jobs using drm_sched_resubmit_jobs() + * 5. Restart the scheduler using drm_sched_start(). At that point, new + * jobs can be queued, and the scheduler thread is unblocked + * + * Note that some GPUs have distinct hardware queues but need to reset + * the GPU globally, which requires extra synchronization between the + * timeout handler of the different &drm_gpu_scheduler. One way to + * achieve this synchronization is to create an ordered workqueue + * (using alloc_ordered_workqueue()) at the driver level, and pass this + * queue to drm_sched_init(), to guarantee that timeout handlers are + * executed sequentially. The above workflow needs to be slightly + * adjusted in that case: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop() + * 2. Try to gracefully stop non-faulty jobs on all queues impacted by + * the reset (optional) + * 3. Issue a GPU reset on all faulty queues (driver-specific) + * 4. Re-submit jobs on all schedulers impacted by the reset using + * drm_sched_resubmit_jobs() + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start() + * + * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, + * and the underlying driver has started or completed recovery. + * + * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer + * available, i.e. has been unplugged. + */ + enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); + + /** + * @free_job: Called once the job's finished fence has been signaled + * and it's time to clean it up. + */ + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * struct drm_gpu_scheduler - scheduler instance-specific data + * + * @ops: backend operations provided by the driver. + * @hw_submission_limit: the max size of the hardware queue. + * @timeout: the time after which a job is removed from the scheduler. + * @name: name of the ring for which this scheduler is being used. + * @sched_rq: priority wise array of run queues. + * @wake_up_worker: the wait queue on which the scheduler sleeps until a job + * is ready to be scheduled. + * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler + * waits on this wait queue until all the scheduled jobs are + * finished. + * @hw_rq_count: the number of jobs currently in the hardware queue. + * @job_id_count: used to assign unique id to the each job. + * @timeout_wq: workqueue used to queue @work_tdr + * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the + * timeout interval is over. + * @thread: the kthread on which the scheduler which run. + * @pending_list: the list of jobs which are currently in the job queue. + * @job_list_lock: lock to protect the pending_list. + * @hang_limit: once the hangs by a job crosses this limit then it is marked + * guilty and it will no longer be considered for scheduling. + * @score: score to help loadbalancer pick a idle sched + * @_score: score used when the driver doesn't provide one + * @ready: marks if the underlying HW is ready to work + * @free_guilty: A hit to time out handler to free the guilty job. + * @dev: system &struct device + * + * One scheduler is implemented for each hardware ring. + */ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct workqueue_struct *timeout_wq; + struct delayed_work work_tdr; + struct task_struct *thread; + struct list_head pending_list; + spinlock_t job_list_lock; + int hang_limit; + atomic_t *score; + atomic_t _score; + bool ready; + bool free_guilty; + struct device *dev; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, + long timeout, struct workqueue_struct *timeout_wq, + atomic_t *score, const char *name, struct device *dev); + +void drm_sched_fini(struct drm_gpu_scheduler *sched); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_job_arm(struct drm_sched_job *job); +int drm_sched_job_add_dependency(struct drm_sched_job *job, + struct dma_fence *fence); +int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, + struct drm_gem_object *obj, + bool write); + + +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +void drm_sched_job_cleanup(struct drm_sched_job *job); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); +void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max); +void drm_sched_increase_karma(struct drm_sched_job *bad); +void drm_sched_reset_karma(struct drm_sched_job *bad); +void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_fault(struct drm_gpu_scheduler *sched); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); + +int drm_sched_entity_init(struct drm_sched_entity *entity, + enum drm_sched_priority priority, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list, + atomic_t *guilty); +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); +void drm_sched_entity_fini(struct drm_sched_entity *entity); +void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job); +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority); +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); + +struct drm_sched_fence *drm_sched_fence_alloc( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity); +void drm_sched_fence_free(struct drm_sched_fence *fence); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); + +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, + unsigned long remaining); +struct drm_gpu_scheduler * +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +#endif diff --git a/include/drm/gud.h b/include/drm/gud.h new file mode 100644 index 000000000..c52a8ba4a --- /dev/null +++ b/include/drm/gud.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2020 Noralf Trønnes + */ + +#ifndef __LINUX_GUD_H +#define __LINUX_GUD_H + +#include <linux/types.h> + +/* + * struct gud_display_descriptor_req - Display descriptor + * @magic: Magic value GUD_DISPLAY_MAGIC + * @version: Protocol version + * @flags: Flags + * - STATUS_ON_SET: Always do a status request after a SET request. + * This is used by the Linux gadget driver since it has + * no way to control the status stage of a control OUT + * request that has a payload. + * - FULL_UPDATE: Always send the entire framebuffer when flushing changes. + * The GUD_REQ_SET_BUFFER request will not be sent + * before each bulk transfer, it will only be sent if the + * previous bulk transfer had failed. This gives the device + * a chance to reset its state machine if needed. + * This flag can not be used in combination with compression. + * @compression: Supported compression types + * - GUD_COMPRESSION_LZ4: LZ4 lossless compression. + * @max_buffer_size: Maximum buffer size the device can handle (optional). + * This is useful for devices that don't have a big enough + * buffer to decompress the entire framebuffer in one go. + * @min_width: Minimum pixel width the controller can handle + * @max_width: Maximum width + * @min_height: Minimum height + * @max_height: Maximum height + * + * Devices that have only one display mode will have min_width == max_width + * and min_height == max_height. + */ +struct gud_display_descriptor_req { + __le32 magic; +#define GUD_DISPLAY_MAGIC 0x1d50614d + __u8 version; + __le32 flags; +#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0) +#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1) + __u8 compression; +#define GUD_COMPRESSION_LZ4 BIT(0) + __le32 max_buffer_size; + __le32 min_width; + __le32 max_width; + __le32 min_height; + __le32 max_height; +} __packed; + +/* + * struct gud_property_req - Property + * @prop: Property + * @val: Value + */ +struct gud_property_req { + __le16 prop; + __le64 val; +} __packed; + +/* + * struct gud_display_mode_req - Display mode + * @clock: Pixel clock in kHz + * @hdisplay: Horizontal display size + * @hsync_start: Horizontal sync start + * @hsync_end: Horizontal sync end + * @htotal: Horizontal total size + * @vdisplay: Vertical display size + * @vsync_start: Vertical sync start + * @vsync_end: Vertical sync end + * @vtotal: Vertical total size + * @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses. + * The deprecated bits are reused for internal protocol flags leaving us + * free to follow DRM for the other bits in the future. + * - FLAG_PREFERRED: Set on the preferred display mode. + */ +struct gud_display_mode_req { + __le32 clock; + __le16 hdisplay; + __le16 hsync_start; + __le16 hsync_end; + __le16 htotal; + __le16 vdisplay; + __le16 vsync_start; + __le16 vsync_end; + __le16 vtotal; + __le32 flags; +#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0) +#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1) +#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2) +#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3) +#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4) +#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5) +#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6) +#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7) +#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8) +#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9) +/* BCast and PixelMultiplex are deprecated */ +#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12) +#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13) +#define GUD_DISPLAY_MODE_FLAG_USER_MASK \ + (GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \ + GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \ + GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \ + GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \ + GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \ + GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2) +/* Internal protocol flags */ +#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10) +} __packed; + +/* + * struct gud_connector_descriptor_req - Connector descriptor + * @connector_type: Connector type (GUD_CONNECTOR_TYPE_*). + * If the host doesn't support the type it should fall back to PANEL. + * @flags: Flags + * - POLL_STATUS: Connector status can change (polled every 10 seconds) + * - INTERLACE: Interlaced modes are supported + * - DOUBLESCAN: Doublescan modes are supported + */ +struct gud_connector_descriptor_req { + __u8 connector_type; +#define GUD_CONNECTOR_TYPE_PANEL 0 +#define GUD_CONNECTOR_TYPE_VGA 1 +#define GUD_CONNECTOR_TYPE_COMPOSITE 2 +#define GUD_CONNECTOR_TYPE_SVIDEO 3 +#define GUD_CONNECTOR_TYPE_COMPONENT 4 +#define GUD_CONNECTOR_TYPE_DVI 5 +#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6 +#define GUD_CONNECTOR_TYPE_HDMI 7 + __le32 flags; +#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0) +#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1) +#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2) +} __packed; + +/* + * struct gud_set_buffer_req - Set buffer transfer info + * @x: X position of rectangle + * @y: Y position + * @width: Pixel width of rectangle + * @height: Pixel height + * @length: Buffer length in bytes + * @compression: Transfer compression + * @compressed_length: Compressed buffer length + * + * This request is issued right before the bulk transfer. + * @x, @y, @width and @height specifies the rectangle where the buffer should be + * placed inside the framebuffer. + */ +struct gud_set_buffer_req { + __le32 x; + __le32 y; + __le32 width; + __le32 height; + __le32 length; + __u8 compression; + __le32 compressed_length; +} __packed; + +/* + * struct gud_state_req - Display state + * @mode: Display mode + * @format: Pixel format GUD_PIXEL_FORMAT_* + * @connector: Connector index + * @properties: Array of properties + * + * The entire state is transferred each time there's a change. + */ +struct gud_state_req { + struct gud_display_mode_req mode; + __u8 format; + __u8 connector; + struct gud_property_req properties[]; +} __packed; + +/* List of supported connector properties: */ + +/* Margins in pixels to deal with overscan, range 0-100 */ +#define GUD_PROPERTY_TV_LEFT_MARGIN 1 +#define GUD_PROPERTY_TV_RIGHT_MARGIN 2 +#define GUD_PROPERTY_TV_TOP_MARGIN 3 +#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4 +#define GUD_PROPERTY_TV_MODE 5 +/* Brightness in percent, range 0-100 */ +#define GUD_PROPERTY_TV_BRIGHTNESS 6 +/* Contrast in percent, range 0-100 */ +#define GUD_PROPERTY_TV_CONTRAST 7 +/* Flicker reduction in percent, range 0-100 */ +#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8 +/* Overscan in percent, range 0-100 */ +#define GUD_PROPERTY_TV_OVERSCAN 9 +/* Saturation in percent, range 0-100 */ +#define GUD_PROPERTY_TV_SATURATION 10 +/* Hue in percent, range 0-100 */ +#define GUD_PROPERTY_TV_HUE 11 + +/* + * Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual + * brightness and not a linear PWM value. 0 is minimum brightness which should not turn the + * backlight completely off. The DPMS connector property should be used to control power which will + * trigger a GUD_REQ_SET_DISPLAY_ENABLE request. + * + * This does not map to a DRM property, it is used with the backlight device. + */ +#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12 + +/* List of supported properties that are not connector propeties: */ + +/* + * Plane rotation. Should return the supported bitmask on + * GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory. + * + * Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared). + */ +#define GUD_PROPERTY_ROTATION 50 + #define GUD_ROTATION_0 BIT(0) + #define GUD_ROTATION_90 BIT(1) + #define GUD_ROTATION_180 BIT(2) + #define GUD_ROTATION_270 BIT(3) + #define GUD_ROTATION_REFLECT_X BIT(4) + #define GUD_ROTATION_REFLECT_Y BIT(5) + #define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \ + GUD_ROTATION_180 | GUD_ROTATION_270 | \ + GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y) + +/* USB Control requests: */ + +/* Get status from the last GET/SET control request. Value is u8. */ +#define GUD_REQ_GET_STATUS 0x00 + /* Status values: */ + #define GUD_STATUS_OK 0x00 + #define GUD_STATUS_BUSY 0x01 + #define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02 + #define GUD_STATUS_PROTOCOL_ERROR 0x03 + #define GUD_STATUS_INVALID_PARAMETER 0x04 + #define GUD_STATUS_ERROR 0x05 + +/* Get display descriptor as a &gud_display_descriptor_req */ +#define GUD_REQ_GET_DESCRIPTOR 0x01 + +/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */ +#define GUD_REQ_GET_FORMATS 0x40 + #define GUD_FORMATS_MAX_NUM 32 + #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */ + #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */ + #define GUD_PIXEL_FORMAT_XRGB1111 0x20 + #define GUD_PIXEL_FORMAT_RGB332 0x30 + #define GUD_PIXEL_FORMAT_RGB565 0x40 + #define GUD_PIXEL_FORMAT_RGB888 0x50 + #define GUD_PIXEL_FORMAT_XRGB8888 0x80 + #define GUD_PIXEL_FORMAT_ARGB8888 0x81 + +/* + * Get supported properties that are not connector propeties as a &gud_property_req array. + * gud_property_req.val often contains the initial value for the property. + */ +#define GUD_REQ_GET_PROPERTIES 0x41 + #define GUD_PROPERTIES_MAX_NUM 32 + +/* Connector requests have the connector index passed in the wValue field */ + +/* Get connector descriptors as an array of &gud_connector_descriptor_req */ +#define GUD_REQ_GET_CONNECTORS 0x50 + #define GUD_CONNECTORS_MAX_NUM 32 + +/* + * Get properties supported by the connector as a &gud_property_req array. + * gud_property_req.val often contains the initial value for the property. + */ +#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51 + #define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32 + +/* + * Issued when there's a TV_MODE property present. + * Gets an array of the supported TV_MODE names each entry of length + * GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated. + */ +#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52 + #define GUD_CONNECTOR_TV_MODE_NAME_LEN 16 + #define GUD_CONNECTOR_TV_MODE_MAX_NUM 16 + +/* When userspace checks connector status, this is issued first, not used for poll requests. */ +#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53 + +/* + * Get connector status. Value is u8. + * + * Userspace will get a HOTPLUG uevent if one of the following is true: + * - Connection status has changed since last + * - CHANGED is set + */ +#define GUD_REQ_GET_CONNECTOR_STATUS 0x54 + #define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00 + #define GUD_CONNECTOR_STATUS_CONNECTED 0x01 + #define GUD_CONNECTOR_STATUS_UNKNOWN 0x02 + #define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03 + #define GUD_CONNECTOR_STATUS_CHANGED BIT(7) + +/* + * Display modes can be fetched as either EDID data or an array of &gud_display_mode_req. + * + * If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes. + * If both display modes and EDID are returned, EDID is just passed on to userspace + * in the EDID connector property. + */ + +/* Get &gud_display_mode_req array of supported display modes */ +#define GUD_REQ_GET_CONNECTOR_MODES 0x55 + #define GUD_CONNECTOR_MAX_NUM_MODES 128 + +/* Get Extended Display Identification Data */ +#define GUD_REQ_GET_CONNECTOR_EDID 0x56 + #define GUD_CONNECTOR_MAX_EDID_LEN 2048 + +/* Set buffer properties before bulk transfer as &gud_set_buffer_req */ +#define GUD_REQ_SET_BUFFER 0x60 + +/* Check display configuration as &gud_state_req */ +#define GUD_REQ_SET_STATE_CHECK 0x61 + +/* Apply the previous STATE_CHECK configuration */ +#define GUD_REQ_SET_STATE_COMMIT 0x62 + +/* Enable/disable the display controller, value is u8: 0/1 */ +#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63 + +/* Enable/disable display/output (DPMS), value is u8: 0/1 */ +#define GUD_REQ_SET_DISPLAY_ENABLE 0x64 + +#endif diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h new file mode 100644 index 000000000..8390b437a --- /dev/null +++ b/include/drm/i2c/ch7006.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_CH7006_H__ +#define __DRM_I2C_CH7006_H__ + +/** + * struct ch7006_encoder_params + * + * Describes how the ch7006 is wired up with the GPU. It should be + * used as the @params parameter of its @set_config method. + * + * See "http://www.chrontel.com/pdf/7006.pdf" for their precise + * meaning. + */ +struct ch7006_encoder_params { + enum { + CH7006_FORMAT_RGB16 = 0, + CH7006_FORMAT_YCrCb24m16, + CH7006_FORMAT_RGB24m16, + CH7006_FORMAT_RGB15, + CH7006_FORMAT_RGB24m12C, + CH7006_FORMAT_RGB24m12I, + CH7006_FORMAT_RGB24m8, + CH7006_FORMAT_RGB16m8, + CH7006_FORMAT_RGB15m8, + CH7006_FORMAT_YCrCb24m8, + } input_format; + + enum { + CH7006_CLOCK_SLAVE = 0, + CH7006_CLOCK_MASTER, + } clock_mode; + + enum { + CH7006_CLOCK_EDGE_NEG = 0, + CH7006_CLOCK_EDGE_POS, + } clock_edge; + + int xcm, pcm; + + enum { + CH7006_SYNC_SLAVE = 0, + CH7006_SYNC_MASTER, + } sync_direction; + + enum { + CH7006_SYNC_SEPARATED = 0, + CH7006_SYNC_EMBEDDED, + } sync_encoding; + + enum { + CH7006_POUT_1_8V = 0, + CH7006_POUT_3_3V, + } pout_level; + + enum { + CH7006_ACTIVE_HSYNC = 0, + CH7006_ACTIVE_DSTART, + } active_detect; +}; + +#endif diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h new file mode 100644 index 000000000..205e27384 --- /dev/null +++ b/include/drm/i2c/sil164.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_SIL164_H__ +#define __DRM_I2C_SIL164_H__ + +/** + * struct sil164_encoder_params + * + * Describes how the sil164 is connected to the GPU. It should be used + * as the @params parameter of its @set_config method. + * + * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". + */ +struct sil164_encoder_params { + enum { + SIL164_INPUT_EDGE_FALLING = 0, + SIL164_INPUT_EDGE_RISING + } input_edge; + + enum { + SIL164_INPUT_WIDTH_12BIT = 0, + SIL164_INPUT_WIDTH_24BIT + } input_width; + + enum { + SIL164_INPUT_SINGLE_EDGE = 0, + SIL164_INPUT_DUAL_EDGE + } input_dual; + + enum { + SIL164_PLL_FILTER_ON = 0, + SIL164_PLL_FILTER_OFF, + } pll_filter; + + int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */ + int duallink_skew; /** < Allowed range [-4, 3]. */ +}; + +#endif diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h new file mode 100644 index 000000000..3cb25ccbe --- /dev/null +++ b/include/drm/i2c/tda998x.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_I2C_TDA998X_H__ +#define __DRM_I2C_TDA998X_H__ + +#include <linux/hdmi.h> +#include <dt-bindings/display/tda998x.h> + +enum { + AFMT_UNUSED = 0, + AFMT_SPDIF = TDA998x_SPDIF, + AFMT_I2S = TDA998x_I2S, +}; + +struct tda998x_audio_params { + u8 config; + u8 format; + unsigned sample_width; + unsigned sample_rate; + struct hdmi_audio_infoframe cea; + u8 status[5]; +}; + +struct tda998x_encoder_params { + u8 swap_b:3; + u8 mirr_b:1; + u8 swap_a:3; + u8 mirr_a:1; + u8 swap_d:3; + u8 mirr_d:1; + u8 swap_c:3; + u8 mirr_c:1; + u8 swap_f:3; + u8 mirr_f:1; + u8 swap_e:3; + u8 mirr_e:1; + + struct tda998x_audio_params audio_params; +}; + +#endif diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h new file mode 100644 index 000000000..c1e2a43d2 --- /dev/null +++ b/include/drm/i915_component.h @@ -0,0 +1,55 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _I915_COMPONENT_H_ +#define _I915_COMPONENT_H_ + +#include "drm_audio_component.h" + +enum i915_component_type { + I915_COMPONENT_AUDIO = 1, + I915_COMPONENT_HDCP, + I915_COMPONENT_PXP +}; + +/* MAX_PORT is the number of port + * It must be sync with I915_MAX_PORTS defined i915_drv.h + */ +#define MAX_PORTS 9 + +/** + * struct i915_audio_component - Used for direct communication between i915 and hda drivers + */ +struct i915_audio_component { + /** + * @base: the drm_audio_component base class + */ + struct drm_audio_component base; + + /** + * @aud_sample_rate: the array of audio sample rate per port + */ + int aud_sample_rate[MAX_PORTS]; +}; + +#endif /* _I915_COMPONENT_H_ */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h new file mode 100644 index 000000000..7adce327c --- /dev/null +++ b/include/drm/i915_drm.h @@ -0,0 +1,102 @@ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _I915_DRM_H_ +#define _I915_DRM_H_ + +#include <linux/types.h> + +/* For use by IPS driver */ +unsigned long i915_read_mch_val(void); +bool i915_gpu_raise(void); +bool i915_gpu_lower(void); +bool i915_gpu_busy(void); +bool i915_gpu_turbo_disable(void); + +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + +/* + * The Bridge device's PCI config space has information about the + * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. + */ +#define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ +#define SNB_GMCH_GGMS_MASK 0x3 +#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ +#define SNB_GMCH_GMS_MASK 0x1f +#define BDW_GMCH_GGMS_SHIFT 6 +#define BDW_GMCH_GGMS_MASK 0x3 +#define BDW_GMCH_GMS_SHIFT 8 +#define BDW_GMCH_GMS_MASK 0xff + +#define I830_GMCH_CTRL 0x52 + +#define I830_GMCH_GMS_MASK 0x70 +#define I830_GMCH_GMS_LOCAL 0x10 +#define I830_GMCH_GMS_STOLEN_512 0x20 +#define I830_GMCH_GMS_STOLEN_1024 0x30 +#define I830_GMCH_GMS_STOLEN_8192 0x40 + +#define I855_GMCH_GMS_MASK 0xF0 +#define I855_GMCH_GMS_STOLEN_0M 0x0 +#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) +#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) +#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) +#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) +#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) +#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) +#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) +#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) + +#define I830_DRB3 0x63 +#define I85X_DRB3 0x43 +#define I865_TOUD 0xc4 + +#define I830_ESMRAMC 0x91 +#define I845_ESMRAMC 0x9e +#define I85X_ESMRAMC 0x61 +#define TSEG_ENABLE (1 << 0) +#define I830_TSEG_SIZE_512K (0 << 1) +#define I830_TSEG_SIZE_1M (1 << 1) +#define I845_TSEG_SIZE_MASK (3 << 1) +#define I845_TSEG_SIZE_512K (2 << 1) +#define I845_TSEG_SIZE_1M (3 << 1) + +#define INTEL_BSM 0x5c +#define INTEL_GEN11_BSM_DW0 0xc0 +#define INTEL_GEN11_BSM_DW1 0xc4 +#define INTEL_BSM_MASK (-(1u << 20)) + +#endif /* _I915_DRM_H_ */ diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h new file mode 100644 index 000000000..f441cbcd9 --- /dev/null +++ b/include/drm/i915_mei_hdcp_interface.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: (GPL-2.0+) */ +/* + * Copyright © 2017-2019 Intel Corporation + * + * Authors: + * Ramalingam C <ramalingam.c@intel.com> + */ + +#ifndef _I915_MEI_HDCP_INTERFACE_H_ +#define _I915_MEI_HDCP_INTERFACE_H_ + +#include <linux/mutex.h> +#include <linux/device.h> +#include <drm/display/drm_hdcp.h> + +/** + * enum hdcp_port_type - HDCP port implementation type defined by ME FW + * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type + * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port + * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON + * (HDMI 2.0) solution + * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) + * solution + */ +enum hdcp_port_type { + HDCP_PORT_TYPE_INVALID, + HDCP_PORT_TYPE_INTEGRATED, + HDCP_PORT_TYPE_LSPCON, + HDCP_PORT_TYPE_CPDP +}; + +/** + * enum hdcp_wired_protocol - HDCP adaptation used on the port + * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol + * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port + * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port + */ +enum hdcp_wired_protocol { + HDCP_PROTOCOL_INVALID, + HDCP_PROTOCOL_HDMI, + HDCP_PROTOCOL_DP +}; + +enum mei_fw_ddi { + MEI_DDI_INVALID_PORT = 0x0, + + MEI_DDI_B = 1, + MEI_DDI_C, + MEI_DDI_D, + MEI_DDI_E, + MEI_DDI_F, + MEI_DDI_A = 7, + MEI_DDI_RANGE_END = MEI_DDI_A, +}; + +/** + * enum mei_fw_tc - ME Firmware defined index for transcoders + * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder + * @MEI_TRANSCODER_EDP: Index for EDP Transcoder + * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder + * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder + * @MEI_TRANSCODER_A: Index for Transcoder A + * @MEI_TRANSCODER_B: Index for Transcoder B + * @MEI_TRANSCODER_C: Index for Transcoder C + * @MEI_TRANSCODER_D: Index for Transcoder D + */ +enum mei_fw_tc { + MEI_INVALID_TRANSCODER = 0x00, + MEI_TRANSCODER_EDP, + MEI_TRANSCODER_DSI0, + MEI_TRANSCODER_DSI1, + MEI_TRANSCODER_A = 0x10, + MEI_TRANSCODER_B, + MEI_TRANSCODER_C, + MEI_TRANSCODER_D +}; + +/** + * struct hdcp_port_data - intel specific HDCP port data + * @fw_ddi: ddi index as per ME FW + * @fw_tc: transcoder index as per ME FW + * @port_type: HDCP port type as per ME FW classification + * @protocol: HDCP adaptation as per ME FW + * @k: No of streams transmitted on a port. Only on DP MST this is != 1 + * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the + * streams + */ +struct hdcp_port_data { + enum mei_fw_ddi fw_ddi; + enum mei_fw_tc fw_tc; + u8 port_type; + u8 protocol; + u16 k; + u32 seq_num_m; + struct hdcp2_streamid_type *streams; +}; + +/** + * struct i915_hdcp_component_ops- ops for HDCP2.2 services. + * @owner: Module providing the ops + * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. + * And Prepare AKE_Init. + * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate + * AKE_Send_Cert and prepare + AKE_Stored_Km/AKE_No_Stored_Km + * @verify_hprime: Verify AKE_Send_H_prime + * @store_pairing_info: Store pairing info received + * @initiate_locality_check: Prepare LC_Init + * @verify_lprime: Verify lprime + * @get_session_key: Prepare SKE_Send_Eks + * @repeater_check_flow_prepare_ack: Validate the Downstream topology + * and prepare rep_ack + * @verify_mprime: Verify mprime + * @enable_hdcp_authentication: Mark a port as authenticated. + * @close_hdcp_session: Close the Wired HDCP Tx session per port. + * This also disables the authenticated state of the port. + */ +struct i915_hdcp_component_ops { + /** + * @owner: mei_hdcp module + */ + struct module *owner; + + int (*initiate_hdcp2_session)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data); + int (*verify_receiver_cert_prepare_km)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert + *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz); + int (*verify_hprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime); + int (*store_pairing_info)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info + *pairing_info); + int (*initiate_locality_check)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data); + int (*verify_lprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime); + int (*get_session_key)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data); + int (*repeater_check_flow_prepare_ack)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack); + int (*verify_mprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready); + int (*enable_hdcp_authentication)(struct device *dev, + struct hdcp_port_data *data); + int (*close_hdcp_session)(struct device *dev, + struct hdcp_port_data *data); +}; + +/** + * struct i915_hdcp_component_master - Used for communication between i915 + * and mei_hdcp drivers for the HDCP2.2 services + * @mei_dev: device that provide the HDCP2.2 service from MEI Bus. + * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver. + */ +struct i915_hdcp_comp_master { + struct device *mei_dev; + const struct i915_hdcp_component_ops *ops; + + /* To protect the above members. */ + struct mutex mutex; +}; + +#endif /* _I915_MEI_HDCP_INTERFACE_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h new file mode 100644 index 000000000..8f648c32a --- /dev/null +++ b/include/drm/i915_pciids.h @@ -0,0 +1,750 @@ +/* + * Copyright 2013 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _I915_PCIIDS_H +#define _I915_PCIIDS_H + +/* + * A pci_device_id struct { + * __u32 vendor, device; + * __u32 subvendor, subdevice; + * __u32 class, class_mask; + * kernel_ulong_t driver_data; + * }; + * Don't use C99 here because "class" is reserved and we want to + * give userspace flexibility. + */ +#define INTEL_VGA_DEVICE(id, info) { \ + 0x8086, id, \ + ~0, ~0, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_QUANTA_VGA_DEVICE(info) { \ + 0x8086, 0x16a, \ + 0x152d, 0x8990, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_I810_IDS(info) \ + INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \ + INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \ + INTEL_VGA_DEVICE(0x7125, info) /* I810_E */ + +#define INTEL_I815_IDS(info) \ + INTEL_VGA_DEVICE(0x1132, info) /* I815*/ + +#define INTEL_I830_IDS(info) \ + INTEL_VGA_DEVICE(0x3577, info) + +#define INTEL_I845G_IDS(info) \ + INTEL_VGA_DEVICE(0x2562, info) + +#define INTEL_I85X_IDS(info) \ + INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ + INTEL_VGA_DEVICE(0x358e, info) + +#define INTEL_I865G_IDS(info) \ + INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ + +#define INTEL_I915G_IDS(info) \ + INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ + INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ + +#define INTEL_I915GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ + +#define INTEL_I945G_IDS(info) \ + INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ + +#define INTEL_I945GM_IDS(info) \ + INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ + INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ + +#define INTEL_I965G_IDS(info) \ + INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ + INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ + INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ + INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ + +#define INTEL_G33_IDS(info) \ + INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ + INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ + INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ + +#define INTEL_I965GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ + INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ + +#define INTEL_GM45_IDS(info) \ + INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ + +#define INTEL_G45_IDS(info) \ + INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ + INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ + INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ + INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ + INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ + INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ + +#define INTEL_PINEVIEW_G_IDS(info) \ + INTEL_VGA_DEVICE(0xa001, info) + +#define INTEL_PINEVIEW_M_IDS(info) \ + INTEL_VGA_DEVICE(0xa011, info) + +#define INTEL_IRONLAKE_D_IDS(info) \ + INTEL_VGA_DEVICE(0x0042, info) + +#define INTEL_IRONLAKE_M_IDS(info) \ + INTEL_VGA_DEVICE(0x0046, info) + +#define INTEL_SNB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0102, info), \ + INTEL_VGA_DEVICE(0x010A, info) + +#define INTEL_SNB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0112, info), \ + INTEL_VGA_DEVICE(0x0122, info) + +#define INTEL_SNB_D_IDS(info) \ + INTEL_SNB_D_GT1_IDS(info), \ + INTEL_SNB_D_GT2_IDS(info) + +#define INTEL_SNB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0106, info) + +#define INTEL_SNB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0116, info), \ + INTEL_VGA_DEVICE(0x0126, info) + +#define INTEL_SNB_M_IDS(info) \ + INTEL_SNB_M_GT1_IDS(info), \ + INTEL_SNB_M_GT2_IDS(info) + +#define INTEL_IVB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ + +#define INTEL_IVB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ + +#define INTEL_IVB_M_IDS(info) \ + INTEL_IVB_M_GT1_IDS(info), \ + INTEL_IVB_M_GT2_IDS(info) + +#define INTEL_IVB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ + +#define INTEL_IVB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ + +#define INTEL_IVB_D_IDS(info) \ + INTEL_IVB_D_GT1_IDS(info), \ + INTEL_IVB_D_GT2_IDS(info) + +#define INTEL_IVB_Q_IDS(info) \ + INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ + +#define INTEL_HSW_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ + INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */ + +#define INTEL_HSW_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */ + +#define INTEL_HSW_GT1_IDS(info) \ + INTEL_HSW_ULT_GT1_IDS(info), \ + INTEL_HSW_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \ + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */ + +#define INTEL_HSW_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ + INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \ + +#define INTEL_HSW_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \ + +#define INTEL_HSW_GT2_IDS(info) \ + INTEL_HSW_ULT_GT2_IDS(info), \ + INTEL_HSW_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \ + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */ + +#define INTEL_HSW_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */ + +#define INTEL_HSW_GT3_IDS(info) \ + INTEL_HSW_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \ + INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \ + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ + +#define INTEL_HSW_IDS(info) \ + INTEL_HSW_GT1_IDS(info), \ + INTEL_HSW_GT2_IDS(info), \ + INTEL_HSW_GT3_IDS(info) + +#define INTEL_VLV_IDS(info) \ + INTEL_VGA_DEVICE(0x0f30, info), \ + INTEL_VGA_DEVICE(0x0f31, info), \ + INTEL_VGA_DEVICE(0x0f32, info), \ + INTEL_VGA_DEVICE(0x0f33, info) + +#define INTEL_BDW_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */ + +#define INTEL_BDW_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */ + +#define INTEL_BDW_GT1_IDS(info) \ + INTEL_BDW_ULT_GT1_IDS(info), \ + INTEL_BDW_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ + +#define INTEL_BDW_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */ + +#define INTEL_BDW_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ + +#define INTEL_BDW_GT2_IDS(info) \ + INTEL_BDW_ULT_GT2_IDS(info), \ + INTEL_BDW_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ + INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ + +#define INTEL_BDW_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \ + +#define INTEL_BDW_ULX_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x162E, info) /* ULX */ + +#define INTEL_BDW_GT3_IDS(info) \ + INTEL_BDW_ULT_GT3_IDS(info), \ + INTEL_BDW_ULX_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ + +#define INTEL_BDW_ULT_RSVD_IDS(info) \ + INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163B, info) /* Iris */ + +#define INTEL_BDW_ULX_RSVD_IDS(info) \ + INTEL_VGA_DEVICE(0x163E, info) /* ULX */ + +#define INTEL_BDW_RSVD_IDS(info) \ + INTEL_BDW_ULT_RSVD_IDS(info), \ + INTEL_BDW_ULX_RSVD_IDS(info), \ + INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ + +#define INTEL_BDW_IDS(info) \ + INTEL_BDW_GT1_IDS(info), \ + INTEL_BDW_GT2_IDS(info), \ + INTEL_BDW_GT3_IDS(info), \ + INTEL_BDW_RSVD_IDS(info) + +#define INTEL_CHV_IDS(info) \ + INTEL_VGA_DEVICE(0x22b0, info), \ + INTEL_VGA_DEVICE(0x22b1, info), \ + INTEL_VGA_DEVICE(0x22b2, info), \ + INTEL_VGA_DEVICE(0x22b3, info) + +#define INTEL_SKL_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */ + +#define INTEL_SKL_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */ + +#define INTEL_SKL_GT1_IDS(info) \ + INTEL_SKL_ULT_GT1_IDS(info), \ + INTEL_SKL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */ + +#define INTEL_SKL_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */ + +#define INTEL_SKL_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */ + +#define INTEL_SKL_GT2_IDS(info) \ + INTEL_SKL_ULT_GT2_IDS(info), \ + INTEL_SKL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ + +#define INTEL_SKL_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \ + INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */ + +#define INTEL_SKL_GT3_IDS(info) \ + INTEL_SKL_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \ + INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */ + +#define INTEL_SKL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \ + INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */ + +#define INTEL_SKL_IDS(info) \ + INTEL_SKL_GT1_IDS(info), \ + INTEL_SKL_GT2_IDS(info), \ + INTEL_SKL_GT3_IDS(info), \ + INTEL_SKL_GT4_IDS(info) + +#define INTEL_BXT_IDS(info) \ + INTEL_VGA_DEVICE(0x0A84, info), \ + INTEL_VGA_DEVICE(0x1A84, info), \ + INTEL_VGA_DEVICE(0x1A85, info), \ + INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ + INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ + +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + +#define INTEL_KBL_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */ + +#define INTEL_KBL_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */ + +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_KBL_ULT_GT1_IDS(info), \ + INTEL_KBL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */ + +#define INTEL_KBL_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */ + +#define INTEL_KBL_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_KBL_ULT_GT2_IDS(info), \ + INTEL_KBL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_KBL_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ + +/* AML/KBL Y GT2 */ +#define INTEL_AML_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ + +/* AML/CFL Y GT2 */ +#define INTEL_AML_CFL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x87CA, info) + +/* CML GT1 */ +#define INTEL_CML_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9BA2, info), \ + INTEL_VGA_DEVICE(0x9BA4, info), \ + INTEL_VGA_DEVICE(0x9BA5, info), \ + INTEL_VGA_DEVICE(0x9BA8, info) + +#define INTEL_CML_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9B21, info), \ + INTEL_VGA_DEVICE(0x9BAA, info), \ + INTEL_VGA_DEVICE(0x9BAC, info) + +/* CML GT2 */ +#define INTEL_CML_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9BC2, info), \ + INTEL_VGA_DEVICE(0x9BC4, info), \ + INTEL_VGA_DEVICE(0x9BC5, info), \ + INTEL_VGA_DEVICE(0x9BC6, info), \ + INTEL_VGA_DEVICE(0x9BC8, info), \ + INTEL_VGA_DEVICE(0x9BE6, info), \ + INTEL_VGA_DEVICE(0x9BF6, info) + +#define INTEL_CML_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9B41, info), \ + INTEL_VGA_DEVICE(0x9BCA, info), \ + INTEL_VGA_DEVICE(0x9BCC, info) + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info), \ + INTEL_AML_KBL_GT2_IDS(info) + +/* CFL S */ +#define INTEL_CFL_S_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ + +#define INTEL_CFL_S_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ + +/* CFL H */ +#define INTEL_CFL_H_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E9C, info) + +#define INTEL_CFL_H_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */ + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ +#define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +/* WHL/CFL U GT1 */ +#define INTEL_WHL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +/* WHL/CFL U GT2 */ +#define INTEL_WHL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info) + +/* WHL/CFL U GT3 */ +#define INTEL_WHL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info) + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT1_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info), \ + INTEL_WHL_U_GT1_IDS(info), \ + INTEL_WHL_U_GT2_IDS(info), \ + INTEL_WHL_U_GT3_IDS(info), \ + INTEL_AML_CFL_GT2_IDS(info), \ + INTEL_CML_GT1_IDS(info), \ + INTEL_CML_GT2_IDS(info), \ + INTEL_CML_U_GT1_IDS(info), \ + INTEL_CML_U_GT2_IDS(info) + +/* CNL */ +#define INTEL_CNL_PORT_F_IDS(info) \ + INTEL_VGA_DEVICE(0x5A44, info), \ + INTEL_VGA_DEVICE(0x5A4C, info), \ + INTEL_VGA_DEVICE(0x5A54, info), \ + INTEL_VGA_DEVICE(0x5A5C, info) + +#define INTEL_CNL_IDS(info) \ + INTEL_CNL_PORT_F_IDS(info), \ + INTEL_VGA_DEVICE(0x5A40, info), \ + INTEL_VGA_DEVICE(0x5A41, info), \ + INTEL_VGA_DEVICE(0x5A42, info), \ + INTEL_VGA_DEVICE(0x5A49, info), \ + INTEL_VGA_DEVICE(0x5A4A, info), \ + INTEL_VGA_DEVICE(0x5A50, info), \ + INTEL_VGA_DEVICE(0x5A51, info), \ + INTEL_VGA_DEVICE(0x5A52, info), \ + INTEL_VGA_DEVICE(0x5A59, info), \ + INTEL_VGA_DEVICE(0x5A5A, info) + +/* ICL */ +#define INTEL_ICL_PORT_F_IDS(info) \ + INTEL_VGA_DEVICE(0x8A50, info), \ + INTEL_VGA_DEVICE(0x8A52, info), \ + INTEL_VGA_DEVICE(0x8A53, info), \ + INTEL_VGA_DEVICE(0x8A54, info), \ + INTEL_VGA_DEVICE(0x8A56, info), \ + INTEL_VGA_DEVICE(0x8A57, info), \ + INTEL_VGA_DEVICE(0x8A58, info), \ + INTEL_VGA_DEVICE(0x8A59, info), \ + INTEL_VGA_DEVICE(0x8A5A, info), \ + INTEL_VGA_DEVICE(0x8A5B, info), \ + INTEL_VGA_DEVICE(0x8A5C, info), \ + INTEL_VGA_DEVICE(0x8A70, info), \ + INTEL_VGA_DEVICE(0x8A71, info) + +#define INTEL_ICL_11_IDS(info) \ + INTEL_ICL_PORT_F_IDS(info), \ + INTEL_VGA_DEVICE(0x8A51, info), \ + INTEL_VGA_DEVICE(0x8A5D, info) + +/* EHL */ +#define INTEL_EHL_IDS(info) \ + INTEL_VGA_DEVICE(0x4541, info), \ + INTEL_VGA_DEVICE(0x4551, info), \ + INTEL_VGA_DEVICE(0x4555, info), \ + INTEL_VGA_DEVICE(0x4557, info), \ + INTEL_VGA_DEVICE(0x4571, info) + +/* JSL */ +#define INTEL_JSL_IDS(info) \ + INTEL_VGA_DEVICE(0x4E51, info), \ + INTEL_VGA_DEVICE(0x4E55, info), \ + INTEL_VGA_DEVICE(0x4E57, info), \ + INTEL_VGA_DEVICE(0x4E61, info), \ + INTEL_VGA_DEVICE(0x4E71, info) + +/* TGL */ +#define INTEL_TGL_12_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info) + +#define INTEL_TGL_12_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A49, info), \ + INTEL_VGA_DEVICE(0x9A59, info), \ + INTEL_VGA_DEVICE(0x9A78, info), \ + INTEL_VGA_DEVICE(0x9AC0, info), \ + INTEL_VGA_DEVICE(0x9AC9, info), \ + INTEL_VGA_DEVICE(0x9AD9, info), \ + INTEL_VGA_DEVICE(0x9AF8, info) + +#define INTEL_TGL_12_IDS(info) \ + INTEL_TGL_12_GT1_IDS(info), \ + INTEL_TGL_12_GT2_IDS(info) + +/* RKL */ +#define INTEL_RKL_IDS(info) \ + INTEL_VGA_DEVICE(0x4C80, info), \ + INTEL_VGA_DEVICE(0x4C8A, info), \ + INTEL_VGA_DEVICE(0x4C8B, info), \ + INTEL_VGA_DEVICE(0x4C8C, info), \ + INTEL_VGA_DEVICE(0x4C90, info), \ + INTEL_VGA_DEVICE(0x4C9A, info) + +/* DG1 */ +#define INTEL_DG1_IDS(info) \ + INTEL_VGA_DEVICE(0x4905, info), \ + INTEL_VGA_DEVICE(0x4906, info), \ + INTEL_VGA_DEVICE(0x4907, info), \ + INTEL_VGA_DEVICE(0x4908, info), \ + INTEL_VGA_DEVICE(0x4909, info) + +/* ADL-S */ +#define INTEL_ADLS_IDS(info) \ + INTEL_VGA_DEVICE(0x4680, info), \ + INTEL_VGA_DEVICE(0x4682, info), \ + INTEL_VGA_DEVICE(0x4688, info), \ + INTEL_VGA_DEVICE(0x468A, info), \ + INTEL_VGA_DEVICE(0x468B, info), \ + INTEL_VGA_DEVICE(0x4690, info), \ + INTEL_VGA_DEVICE(0x4692, info), \ + INTEL_VGA_DEVICE(0x4693, info) + +/* ADL-P */ +#define INTEL_ADLP_IDS(info) \ + INTEL_VGA_DEVICE(0x46A0, info), \ + INTEL_VGA_DEVICE(0x46A1, info), \ + INTEL_VGA_DEVICE(0x46A2, info), \ + INTEL_VGA_DEVICE(0x46A3, info), \ + INTEL_VGA_DEVICE(0x46A6, info), \ + INTEL_VGA_DEVICE(0x46A8, info), \ + INTEL_VGA_DEVICE(0x46AA, info), \ + INTEL_VGA_DEVICE(0x462A, info), \ + INTEL_VGA_DEVICE(0x4626, info), \ + INTEL_VGA_DEVICE(0x4628, info), \ + INTEL_VGA_DEVICE(0x46B0, info), \ + INTEL_VGA_DEVICE(0x46B1, info), \ + INTEL_VGA_DEVICE(0x46B2, info), \ + INTEL_VGA_DEVICE(0x46B3, info), \ + INTEL_VGA_DEVICE(0x46C0, info), \ + INTEL_VGA_DEVICE(0x46C1, info), \ + INTEL_VGA_DEVICE(0x46C2, info), \ + INTEL_VGA_DEVICE(0x46C3, info) + +/* ADL-N */ +#define INTEL_ADLN_IDS(info) \ + INTEL_VGA_DEVICE(0x46D0, info), \ + INTEL_VGA_DEVICE(0x46D1, info), \ + INTEL_VGA_DEVICE(0x46D2, info) + +/* RPL-S */ +#define INTEL_RPLS_IDS(info) \ + INTEL_VGA_DEVICE(0xA780, info), \ + INTEL_VGA_DEVICE(0xA781, info), \ + INTEL_VGA_DEVICE(0xA782, info), \ + INTEL_VGA_DEVICE(0xA783, info), \ + INTEL_VGA_DEVICE(0xA788, info), \ + INTEL_VGA_DEVICE(0xA789, info), \ + INTEL_VGA_DEVICE(0xA78A, info), \ + INTEL_VGA_DEVICE(0xA78B, info) + +/* RPL-P */ +#define INTEL_RPLP_IDS(info) \ + INTEL_VGA_DEVICE(0xA720, info), \ + INTEL_VGA_DEVICE(0xA721, info), \ + INTEL_VGA_DEVICE(0xA7A0, info), \ + INTEL_VGA_DEVICE(0xA7A1, info), \ + INTEL_VGA_DEVICE(0xA7A8, info), \ + INTEL_VGA_DEVICE(0xA7A9, info) + +/* DG2 */ +#define INTEL_DG2_G10_IDS(info) \ + INTEL_VGA_DEVICE(0x5690, info), \ + INTEL_VGA_DEVICE(0x5691, info), \ + INTEL_VGA_DEVICE(0x5692, info), \ + INTEL_VGA_DEVICE(0x56A0, info), \ + INTEL_VGA_DEVICE(0x56A1, info), \ + INTEL_VGA_DEVICE(0x56A2, info) + +#define INTEL_DG2_G11_IDS(info) \ + INTEL_VGA_DEVICE(0x5693, info), \ + INTEL_VGA_DEVICE(0x5694, info), \ + INTEL_VGA_DEVICE(0x5695, info), \ + INTEL_VGA_DEVICE(0x56A5, info), \ + INTEL_VGA_DEVICE(0x56A6, info), \ + INTEL_VGA_DEVICE(0x56B0, info), \ + INTEL_VGA_DEVICE(0x56B1, info) + +#define INTEL_DG2_G12_IDS(info) \ + INTEL_VGA_DEVICE(0x5696, info), \ + INTEL_VGA_DEVICE(0x5697, info), \ + INTEL_VGA_DEVICE(0x56A3, info), \ + INTEL_VGA_DEVICE(0x56A4, info), \ + INTEL_VGA_DEVICE(0x56B2, info), \ + INTEL_VGA_DEVICE(0x56B3, info) + +#define INTEL_DG2_IDS(info) \ + INTEL_DG2_G10_IDS(info), \ + INTEL_DG2_G11_IDS(info), \ + INTEL_DG2_G12_IDS(info) + +#define INTEL_ATS_M150_IDS(info) \ + INTEL_VGA_DEVICE(0x56C0, info) + +#define INTEL_ATS_M75_IDS(info) \ + INTEL_VGA_DEVICE(0x56C1, info) + +#define INTEL_ATS_M_IDS(info) \ + INTEL_ATS_M150_IDS(info), \ + INTEL_ATS_M75_IDS(info) +/* MTL */ +#define INTEL_MTL_M_IDS(info) \ + INTEL_VGA_DEVICE(0x7D40, info), \ + INTEL_VGA_DEVICE(0x7D60, info) + +#define INTEL_MTL_P_IDS(info) \ + INTEL_VGA_DEVICE(0x7D45, info), \ + INTEL_VGA_DEVICE(0x7D55, info), \ + INTEL_VGA_DEVICE(0x7DD5, info) + +#define INTEL_MTL_IDS(info) \ + INTEL_MTL_M_IDS(info), \ + INTEL_MTL_P_IDS(info) + +#endif /* _I915_PCIIDS_H */ diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h new file mode 100644 index 000000000..af593ec64 --- /dev/null +++ b/include/drm/i915_pxp_tee_interface.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef _I915_PXP_TEE_INTERFACE_H_ +#define _I915_PXP_TEE_INTERFACE_H_ + +#include <linux/mutex.h> +#include <linux/device.h> + +/** + * struct i915_pxp_component_ops - ops for PXP services. + * @owner: Module providing the ops + * @send: sends data to PXP + * @receive: receives data from PXP + */ +struct i915_pxp_component_ops { + /** + * @owner: owner of the module provding the ops + */ + struct module *owner; + + int (*send)(struct device *dev, const void *message, size_t size); + int (*recv)(struct device *dev, void *buffer, size_t size); +}; + +/** + * struct i915_pxp_component - Used for communication between i915 and TEE + * drivers for the PXP services + * @tee_dev: device that provide the PXP service from TEE Bus. + * @pxp_ops: Ops implemented by TEE driver, used by i915 driver. + */ +struct i915_pxp_component { + struct device *tee_dev; + const struct i915_pxp_component_ops *ops; + + /* To protect the above members. */ + struct mutex mutex; +}; + +#endif /* _I915_TEE_PXP_INTERFACE_H_ */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 000000000..cb0d5b720 --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Common header for intel-gtt.ko and i915.ko */ + +#ifndef _DRM_INTEL_GTT_H +#define _DRM_INTEL_GTT_H + +#include <linux/types.h> + +struct agp_bridge_data; +struct pci_dev; +struct sg_table; + +void intel_gmch_gtt_get(u64 *gtt_total, + phys_addr_t *mappable_base, + resource_size_t *mappable_end); + +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(void); + +bool intel_gmch_enable_gtt(void); + +void intel_gmch_gtt_flush(void); +void intel_gmch_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags); +void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, + unsigned int pg_start, + unsigned int flags); +void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); + +/* Special gtt memory types */ +#define AGP_DCACHE_MEMORY 1 +#define AGP_PHYS_MEMORY 2 + +/* flag for GFDT type */ +#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) + +#endif diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel_lpe_audio.h new file mode 100644 index 000000000..b6121c8fe --- /dev/null +++ b/include/drm/intel_lpe_audio.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_LPE_AUDIO_H_ +#define _INTEL_LPE_AUDIO_H_ + +#include <linux/types.h> +#include <linux/spinlock_types.h> + +struct platform_device; + +#define HDMI_MAX_ELD_BYTES 128 + +struct intel_hdmi_lpe_audio_port_pdata { + u8 eld[HDMI_MAX_ELD_BYTES]; + int port; + int pipe; + int ls_clock; + bool dp_output; +}; + +struct intel_hdmi_lpe_audio_pdata { + struct intel_hdmi_lpe_audio_port_pdata port[3]; /* for ports B,C,D */ + int num_ports; + int num_pipes; + + void (*notify_audio_lpe)(struct platform_device *pdev, int port); /* port: 0==B,1==C,2==D */ + spinlock_t lpe_audio_slock; +}; + +#endif /* _I915_LPE_AUDIO_H_ */ diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000..125f096c8 --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include <linux/atomic.h> +#include <linux/preempt.h> + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h new file mode 100644 index 000000000..087e3f649 --- /dev/null +++ b/include/drm/task_barrier.h @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/semaphore.h> +#include <linux/atomic.h> + +/* + * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. + * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ + */ + + + +#ifndef DRM_TASK_BARRIER_H_ +#define DRM_TASK_BARRIER_H_ + +/* + * Represents an instance of a task barrier. + */ +struct task_barrier { + unsigned int n; + atomic_t count; + struct semaphore enter_turnstile; + struct semaphore exit_turnstile; +}; + +static inline void task_barrier_signal_turnstile(struct semaphore *turnstile, + unsigned int n) +{ + int i; + + for (i = 0 ; i < n; i++) + up(turnstile); +} + +static inline void task_barrier_init(struct task_barrier *tb) +{ + tb->n = 0; + atomic_set(&tb->count, 0); + sema_init(&tb->enter_turnstile, 0); + sema_init(&tb->exit_turnstile, 0); +} + +static inline void task_barrier_add_task(struct task_barrier *tb) +{ + tb->n++; +} + +static inline void task_barrier_rem_task(struct task_barrier *tb) +{ + tb->n--; +} + +/* + * Lines up all the threads BEFORE the critical point. + * + * When all thread passed this code the entry barrier is back to locked state. + */ +static inline void task_barrier_enter(struct task_barrier *tb) +{ + if (atomic_inc_return(&tb->count) == tb->n) + task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n); + + down(&tb->enter_turnstile); +} + +/* + * Lines up all the threads AFTER the critical point. + * + * This function is used to avoid any one thread running ahead if the barrier is + * used repeatedly . + */ +static inline void task_barrier_exit(struct task_barrier *tb) +{ + if (atomic_dec_return(&tb->count) == 0) + task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n); + + down(&tb->exit_turnstile); +} + +/* Convinieince function when nothing to be done in between entry and exit */ +static inline void task_barrier_full(struct task_barrier *tb) +{ + task_barrier_enter(tb); + task_barrier_exit(tb); +} + +#endif diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h new file mode 100644 index 000000000..44a538ee5 --- /dev/null +++ b/include/drm/ttm/ttm_bo_api.h @@ -0,0 +1,471 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_BO_API_H_ +#define _TTM_BO_API_H_ + +#include <drm/drm_gem.h> +#include <drm/drm_vma_manager.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/bitmap.h> +#include <linux/dma-resv.h> + +#include "ttm_resource.h" + +struct ttm_global; + +struct ttm_device; + +struct iosys_map; + +struct drm_mm_node; + +struct ttm_placement; + +struct ttm_place; + +/** + * enum ttm_bo_type + * + * @ttm_bo_type_device: These are 'normal' buffers that can + * be mmapped by user space. Each of these bos occupy a slot in the + * device address space, that can be used for normal vm operations. + * + * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, + * but they cannot be accessed from user-space. For kernel-only use. + * + * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another + * driver. + */ + +enum ttm_bo_type { + ttm_bo_type_device, + ttm_bo_type_kernel, + ttm_bo_type_sg +}; + +struct ttm_tt; + +/** + * struct ttm_buffer_object + * + * @base: drm_gem_object superclass data. + * @bdev: Pointer to the buffer object device structure. + * @type: The bo type. + * @page_alignment: Page alignment. + * @destroy: Destruction function. If NULL, kfree is used. + * @num_pages: Actual number of pages. + * @kref: Reference count of this buffer object. When this refcount reaches + * zero, the object is destroyed or put on the delayed delete list. + * @mem: structure describing current placement. + * @ttm: TTM structure holding system pages. + * @evicted: Whether the object was evicted without user-space knowing. + * @deleted: True if the object is only a zombie and already deleted. + * @ddestroy: List head for the delayed destroy list. + * @swap: List head for swap LRU list. + * @offset: The current GPU offset, which can have different meanings + * depending on the memory type. For SYSTEM type memory, it should be 0. + * @cur_placement: Hint of current placement. + * + * Base class for TTM buffer object, that deals with data placement and CPU + * mappings. GPU mappings are really up to the driver, but for simpler GPUs + * the driver can usually use the placement offset @offset directly as the + * GPU virtual address. For drivers implementing multiple + * GPU memory manager contexts, the driver should manage the address space + * in these contexts separately and use these objects to get the correct + * placement and caching for these GPU maps. This makes it possible to use + * these objects for even quite elaborate memory management schemes. + * The destroy member, the API visibility of this object makes it possible + * to derive driver specific types. + */ + +struct ttm_buffer_object { + struct drm_gem_object base; + + /** + * Members constant at init. + */ + + struct ttm_device *bdev; + enum ttm_bo_type type; + uint32_t page_alignment; + void (*destroy) (struct ttm_buffer_object *); + + /** + * Members not needing protection. + */ + struct kref kref; + + /** + * Members protected by the bo::resv::reserved lock. + */ + + struct ttm_resource *resource; + struct ttm_tt *ttm; + bool deleted; + struct ttm_lru_bulk_move *bulk_move; + + /** + * Members protected by the bdev::lru_lock. + */ + + struct list_head ddestroy; + + /** + * Members protected by a bo reservation. + */ + + unsigned priority; + unsigned pin_count; + + /** + * Special members that are protected by the reserve lock + * and the bo::lock when written to. Can be read with + * either of these locks held. + */ + + struct sg_table *sg; +}; + +/** + * struct ttm_bo_kmap_obj + * + * @virtual: The current kernel virtual address. + * @page: The page when kmap'ing a single page. + * @bo_kmap_type: Type of bo_kmap. + * + * Object describing a kernel mapping. Since a TTM bo may be located + * in various memory types with various caching policies, the + * mapping can either be an ioremap, a vmap, a kmap or part of a + * premapped region. + */ + +#define TTM_BO_MAP_IOMEM_MASK 0x80 +struct ttm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, + ttm_bo_map_vmap = 2, + ttm_bo_map_kmap = 3, + ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, + } bo_kmap_type; + struct ttm_buffer_object *bo; +}; + +/** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. + * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple + * BOs share the same reservation object. + * @force_alloc: Don't check the memory account during suspend or CPU page + * faults. Should only be used by TTM internally. + * @resv: Reservation object to allow reserved evictions with. + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + bool gfp_retry_mayfail; + bool allow_res_evict; + bool force_alloc; + struct dma_resv *resv; + uint64_t bytes_moved; +}; + +/** + * ttm_bo_get - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + */ +static inline void ttm_bo_get(struct ttm_buffer_object *bo) +{ + kref_get(&bo->kref); +} + +/** + * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless + * its refcount has already reached zero. + * @bo: The buffer object. + * + * Used to reference a TTM buffer object in lookups where the object is removed + * from the lookup structure during the destructor and for RCU lookups. + * + * Returns: @bo if the referencing was successful, NULL otherwise. + */ +static inline __must_check struct ttm_buffer_object * +ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) +{ + if (!kref_get_unless_zero(&bo->kref)) + return NULL; + return bo; +} + +/** + * ttm_bo_wait - wait for buffer idle. + * + * @bo: The buffer object. + * @interruptible: Use interruptible wait. + * @no_wait: Return immediately if buffer is busy. + * + * This function must be called with the bo::mutex held, and makes + * sure any previous rendering to the buffer is completed. + * Note: It might be necessary to block validations before the + * wait by reserving the buffer. + * Returns -EBUSY if no_wait is true and the buffer is busy. + * Returns -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); + +static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) +{ + return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); +} + +/** + * ttm_bo_validate + * + * @bo: The buffer object. + * @placement: Proposed placement for the buffer object. + * @ctx: validation parameters. + * + * Changes placement and caching policy of the buffer object + * according proposed placement. + * Returns + * -EINVAL on invalid proposed placement. + * -ENOMEM on out-of-memory condition. + * -EBUSY if no_wait is true and buffer busy. + * -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); + +/** + * ttm_bo_put + * + * @bo: The buffer object. + * + * Unreference a buffer object. + */ +void ttm_bo_put(struct ttm_buffer_object *bo); + +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); + +/** + * ttm_bo_lock_delayed_workqueue + * + * Prevent the delayed workqueue from running. + * Returns + * True if the workqueue was queued at the time + */ +int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); + +/** + * ttm_bo_unlock_delayed_workqueue + * + * Allows the delayed workqueue to run. + */ +void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); + +/** + * ttm_bo_eviction_valuable + * + * @bo: The buffer object to evict + * @place: the placement we need to make room for + * + * Check if it is valuable to evict the BO to make room for the given placement. + */ +bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, + const struct ttm_place *place); + +int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t alignment, struct ttm_operation_ctx *ctx, + struct sg_table *sg, struct dma_resv *resv, + void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t alignment, bool interruptible, + struct sg_table *sg, struct dma_resv *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_kmap_obj_virtual + * + * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. + * @is_iomem: Pointer to an integer that on return indicates 1 if the + * virtual map is io memory, 0 if normal memory. + * + * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. + * If *is_iomem is 1 on return, the virtual address points to an io memory area, + * that should strictly be accessed by the iowriteXX() and similar functions. + */ +static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, + bool *is_iomem) +{ + *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); + return map->virtual; +} + +/** + * ttm_bo_kmap + * + * @bo: The buffer object. + * @start_page: The first page to map. + * @num_pages: Number of pages to map. + * @map: pointer to a struct ttm_bo_kmap_obj representing the map. + * + * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the + * data in the buffer object. The ttm_kmap_obj_virtual function can then be + * used to obtain a virtual address to the data. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_kunmap + * + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_kmap. + */ +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_vmap + * + * @bo: The buffer object. + * @map: pointer to a struct iosys_map representing the map. + * + * Sets up a kernel virtual mapping, using ioremap or vmap to the + * data in the buffer object. The parameter @map returns the virtual + * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); + +/** + * ttm_bo_vunmap + * + * @bo: The buffer object. + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_vmap(). + */ +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); + +/** + * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. + * + * @vma: vma as input from the fbdev mmap method. + * @bo: The bo backing the address space. + * + * Maps a buffer object. + */ +int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); + +/** + * ttm_bo_io + * + * @bdev: Pointer to the struct ttm_device. + * @filp: Pointer to the struct file attempting to read / write. + * @wbuf: User-space pointer to address of buffer to write. NULL on read. + * @rbuf: User-space pointer to address of buffer to read into. + * Null on write. + * @count: Number of bytes to read / write. + * @f_pos: Pointer to current file position. + * @write: 1 for read, 0 for write. + * + * This function implements read / write into ttm buffer objects, and is + * intended to + * be called from the fops::read and fops::write method. + * Returns: + * See man (2) write, man(2) read. In particular, + * the function may return -ERESTARTSYS if + * interrupted by a signal. + */ +ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); + +int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, + gfp_t gfp_flags); + +void ttm_bo_pin(struct ttm_buffer_object *bo); +void ttm_bo_unpin(struct ttm_buffer_object *bo); + +int ttm_mem_evict_first(struct ttm_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket); + +/* Default number of pre-faulted pages in the TTM fault handler */ +#define TTM_BO_VM_NUM_PREFAULT 16 + +vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, + struct vm_fault *vmf); + +vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, + pgprot_t prot, + pgoff_t num_prefault); + +vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); + +void ttm_bo_vm_open(struct vm_area_struct *vma); + +void ttm_bo_vm_close(struct vm_area_struct *vma); + +int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); +bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); + +vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); + +#endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h new file mode 100644 index 000000000..1afa891f4 --- /dev/null +++ b/include/drm/ttm/ttm_bo_driver.h @@ -0,0 +1,303 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ +#ifndef _TTM_BO_DRIVER_H_ +#define _TTM_BO_DRIVER_H_ + +#include <drm/drm_mm.h> +#include <drm/drm_vma_manager.h> +#include <linux/workqueue.h> +#include <linux/fs.h> +#include <linux/spinlock.h> +#include <linux/dma-resv.h> + +#include <drm/ttm/ttm_device.h> + +#include "ttm_bo_api.h" +#include "ttm_kmap_iter.h" +#include "ttm_placement.h" +#include "ttm_tt.h" +#include "ttm_pool.h" + +/* + * ttm_bo.c + */ + +/** + * ttm_bo_mem_space + * + * @bo: Pointer to a struct ttm_buffer_object. the data of which + * we want to allocate space for. + * @proposed_placement: Proposed new placement for the buffer object. + * @mem: A struct ttm_resource. + * @interruptible: Sleep interruptible when sliping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * + * Allocate memory space for the buffer object pointed to by @bo, using + * the placement flags in @mem, potentially evicting other idle buffer objects. + * This function may sleep while waiting for space to become available. + * Returns: + * -EBUSY: No space available (only if no_wait == 1). + * -ENOMEM: Could not allocate memory for the buffer object, either due to + * fragmentation or concurrent allocators. + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. + */ +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_resource **mem, + struct ttm_operation_ctx *ctx); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + */ +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); + +/** + * ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @ticket: ticket used to acquire the ww_mutex. + * + * Locks a buffer object for validation. (Or prevents other processes from + * locking it for validation), while taking a number of measures to prevent + * deadlocks. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) +{ + int ret; + + if (no_wait) { + bool success; + if (WARN_ON(ticket)) + return -EBUSY; + + success = dma_resv_trylock(bo->base.resv); + return success ? 0 : -EBUSY; + } + + if (interruptible) + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); + else + ret = dma_resv_lock(bo->base.resv, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; +} + +/** + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, + struct ww_acquire_ctx *ticket) +{ + if (interruptible) { + int ret = dma_resv_lock_slow_interruptible(bo->base.resv, + ticket); + if (ret == -EINTR) + ret = -ERESTARTSYS; + return ret; + } + dma_resv_lock_slow(bo->base.resv, ticket); + return 0; +} + +static inline void +ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) +{ + spin_lock(&bo->bdev->lru_lock); + ttm_bo_move_to_lru_tail(bo); + spin_unlock(&bo->bdev->lru_lock); +} + +static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + WARN_ON(bo->resource); + bo->resource = new_mem; +} + +/** + * ttm_bo_move_null = assign memory for a buffer object. + * @bo: The bo to assign the memory to + * @new_mem: The memory to be assigned. + * + * Assign the memory from new_mem to the memory of the buffer object bo. + */ +static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, new_mem); +} + +/** + * ttm_bo_unreserve + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + */ +static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) +{ + ttm_bo_move_to_lru_tail_unlocked(bo); + dma_resv_unlock(bo->base.resv); +} + +/* + * ttm_bo_util.c + */ +int ttm_mem_io_reserve(struct ttm_device *bdev, + struct ttm_resource *mem); +void ttm_mem_io_free(struct ttm_device *bdev, + struct ttm_resource *mem); + +/** + * ttm_bo_move_memcpy + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_resource indicating where to move. + * + * Fallback move function for a mappable buffer object in mappable memory. + * The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem); + +/** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @fence: A fence object that signals when moving is complete. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @pipeline: evictions are to be pipelined. + * @new_mem: struct ttm_resource indicating where to move. + * + * Accelerated move function to be called when an accelerated move + * has been scheduled. The function will create a new temporary buffer object + * representing the old placement, and put the sync object on both buffer + * objects. After that the newly created buffer object is unref'd to be + * destroyed when the move is complete. This will help pipeline + * buffer moves. + */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + bool pipeline, + struct ttm_resource *new_mem); + +/** + * ttm_bo_move_sync_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @new_mem: struct ttm_resource indicating where to move. + * + * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed + * by the caller to be idle. Typically used after memcpy buffer moves. + */ +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem); + +/** + * ttm_bo_pipeline_gutting. + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Pipelined gutting a BO of its backing store. + */ +int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); + +/** + * ttm_io_prot + * + * bo: ttm buffer object + * res: ttm resource object + * @tmp: Page protection flag for a normal, cached mapping. + * + * Utility function that returns the pgprot_t that should be used for + * setting up a PTE with the caching model indicated by @c_state. + */ +pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, + pgprot_t tmp); + +/** + * ttm_bo_tt_bind + * + * Bind the object tt to a memory resource. + */ +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); + +/** + * ttm_bo_tt_destroy. + */ +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); + +void ttm_move_memcpy(bool clear, + u32 num_pages, + struct ttm_kmap_iter *dst_iter, + struct ttm_kmap_iter *src_iter); + +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); +#endif diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h new file mode 100644 index 000000000..235a743d9 --- /dev/null +++ b/include/drm/ttm/ttm_caching.h @@ -0,0 +1,55 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_CACHING_H_ +#define _TTM_CACHING_H_ + +#define TTM_NUM_CACHING_TYPES 3 + +/** + * enum ttm_caching - CPU caching and BUS snooping behavior. + */ +enum ttm_caching { + /** + * @ttm_uncached: Most defensive option for device mappings, + * don't even allow write combining. + */ + ttm_uncached, + + /** + * @ttm_write_combined: Don't cache read accesses, but allow at least + * writes to be combined. + */ + ttm_write_combined, + + /** + * @ttm_cached: Fully cached like normal system memory, requires that + * devices snoop the CPU cache on accesses. + */ + ttm_cached +}; + +pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp); + +#endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h new file mode 100644 index 000000000..95b3c04b1 --- /dev/null +++ b/include/drm/ttm/ttm_device.h @@ -0,0 +1,302 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_DEVICE_H_ +#define _TTM_DEVICE_H_ + +#include <linux/types.h> +#include <linux/workqueue.h> +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_pool.h> + +struct ttm_device; +struct ttm_placement; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +/** + * struct ttm_global - Buffer object driver global data. + */ +extern struct ttm_global { + + /** + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. Constant after init. + */ + struct page *dummy_read_page; + + /** + * @device_list: List of buffer object devices. Protected by + * ttm_global_mutex. + */ + struct list_head device_list; + + /** + * @bo_count: Number of buffer objects allocated by devices. + */ + atomic_t bo_count; +} ttm_glob; + +struct ttm_device_funcs { + /** + * ttm_tt_create + * + * @bo: The buffer object to create the ttm for. + * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, + uint32_t page_flags); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_device *bdev, + struct ttm_tt *ttm); + + /** + * ttm_tt_destroy + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member eviction_valuable + * + * @bo: the buffer object to be evicted + * @place: placement we need room for + * + * Check with the driver if it is valuable to evict a BO to make room + * for a certain placement. + */ + bool (*eviction_valuable)(struct ttm_buffer_object *bo, + const struct ttm_place *place); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + * This should not cause multihop evictions, and the core will warn + * if one is proposed. + */ + + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @ctx: context for this move with parameters + * @new_mem: the new memory region receiving the buffer + @ @hop: placement for driver directed intermediate hop + * + * Move a buffer between two memory regions. + * Returns errno -EMULTIHOP if driver requests a hop + */ + int (*move)(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem, + struct ttm_place *hop); + + /** + * Hook to notify driver about a resource delete. + */ + void (*delete_mem_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify)(struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_device *bdev, + struct ttm_resource *mem); + void (*io_mem_free)(struct ttm_device *bdev, + struct ttm_resource *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); +}; + +/** + * struct ttm_device - Buffer object driver device-specific data. + */ +struct ttm_device { + /** + * @device_list: Our entry in the global device list. + * Constant after bo device init + */ + struct list_head device_list; + + /** + * @funcs: Function table for the device. + * Constant after bo device init + */ + struct ttm_device_funcs *funcs; + + /** + * @sysman: Resource manager for the system domain. + * Access via ttm_manager_type. + */ + struct ttm_resource_manager sysman; + + /** + * @man_drv: An array of resource_managers, one per resource type. + */ + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; + + /** + * @vma_manager: Address space manager for finding BOs to mmap. + */ + struct drm_vma_offset_manager *vma_manager; + + /** + * @pool: page pool for the device. + */ + struct ttm_pool pool; + + /** + * @lru_lock: Protection for the per manager LRU and ddestroy lists. + */ + spinlock_t lru_lock; + + /** + * @ddestroy: Destroyed but not yet cleaned up buffer objects. + */ + struct list_head ddestroy; + + /** + * @pinned: Buffer objects which are pinned and so not on any LRU list. + */ + struct list_head pinned; + + /** + * @dev_mapping: A pointer to the struct address_space for invalidating + * CPU mappings on buffer move. Protected by load/unload sync. + */ + struct address_space *dev_mapping; + + /** + * @wq: Work queue structure for the delayed delete workqueue. + */ + struct delayed_work wq; +}; + +int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); +int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, + gfp_t gfp_flags); + +static inline struct ttm_resource_manager * +ttm_manager_type(struct ttm_device *bdev, int mem_type) +{ + BUILD_BUG_ON(__builtin_constant_p(mem_type) + && mem_type >= TTM_NUM_MEM_TYPES); + return bdev->man_drv[mem_type]; +} + +static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, + struct ttm_resource_manager *manager) +{ + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); + bdev->man_drv[type] = manager; +} + +int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, + struct device *dev, struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, + bool use_dma_alloc, bool use_dma32); +void ttm_device_fini(struct ttm_device *bdev); +void ttm_device_clear_dma_mappings(struct ttm_device *bdev); + +#endif diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h new file mode 100644 index 000000000..a99d7fdf2 --- /dev/null +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -0,0 +1,118 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_EXECBUF_UTIL_H_ +#define _TTM_EXECBUF_UTIL_H_ + +#include <linux/list.h> + +#include "ttm_bo_api.h" + +/** + * struct ttm_validate_buffer + * + * @head: list head for thread-private list. + * @bo: refcounted buffer object pointer. + * @num_shared: How many shared fences we want to add. + */ + +struct ttm_validate_buffer { + struct list_head head; + struct ttm_buffer_object *bo; + unsigned int num_shared; +}; + +/** + * function ttm_eu_backoff_reservation + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * + * Undoes all buffer validation reservations for bos pointed to by + * the list entries. + */ +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); + +/** + * function ttm_eu_reserve_buffers + * + * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only + * non-blocking reserves should be tried. + * @list: thread private list of ttm_validate_buffer structs. + * @intr: should the wait be interruptible + * @dups: [out] optional list of duplicates. + * @del_lru: true if BOs should be removed from the LRU. + * + * Tries to reserve bos pointed to by the list entries for validation. + * If the function returns 0, all buffers are marked as "unfenced", + * taken off the lru lists and are not synced for write CPU usage. + * + * If the function detects a deadlock due to multiple threads trying to + * reserve the same buffers in reverse order, all threads except one will + * back off and retry. This function may sleep while waiting for + * CPU write reservations to be cleared, and for other threads to + * unreserve their buffers. + * + * If intr is set to true, this function may return -ERESTARTSYS if the + * calling process receives a signal while waiting. In that case, no + * buffers on the list will be reserved upon return. + * + * If dups is non NULL all buffers already reserved by the current thread + * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned + * on the first already reserved buffer and all buffers from the list are + * unreserved again. + * + * Buffers reserved by this function should be unreserved by + * a call to either ttm_eu_backoff_reservation() or + * ttm_eu_fence_buffer_objects() when command submission is complete or + * has failed. + */ +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups); + +/** + * function ttm_eu_fence_buffer_objects. + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * @fence: The new exclusive fence for the buffers. + * + * This function should be called when command submission is complete, and + * it will add a new sync object to bos pointed to by entries on @list. + * It also unreserves all buffers, putting them on lru lists. + * + */ +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, + struct dma_fence *fence); + +#endif diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h new file mode 100644 index 000000000..cc5c09a21 --- /dev/null +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef __TTM_KMAP_ITER_H__ +#define __TTM_KMAP_ITER_H__ + +#include <linux/types.h> + +struct ttm_kmap_iter; +struct iosys_map; + +/** + * struct ttm_kmap_iter_ops - Ops structure for a struct + * ttm_kmap_iter. + * @maps_tt: Whether the iterator maps TT memory directly, as opposed + * mapping a TT through an aperture. Both these modes have + * struct ttm_resource_manager::use_tt set, but the latter typically + * returns is_iomem == true from ttm_mem_io_reserve. + */ +struct ttm_kmap_iter_ops { + /** + * kmap_local() - Map a PAGE_SIZE part of the resource using + * kmap_local semantics. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct iosys_map holding the virtual address after + * the operation. + * @i: The location within the resource to map. PAGE_SIZE granularity. + */ + void (*map_local)(struct ttm_kmap_iter *res_iter, + struct iosys_map *dmap, pgoff_t i); + /** + * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * mapped using kmap_local. + * @res_iter: Pointer to the struct ttm_kmap_iter representing + * the resource. + * @dmap: The struct iosys_map holding the virtual address after + * the operation. + */ + void (*unmap_local)(struct ttm_kmap_iter *res_iter, + struct iosys_map *dmap); + bool maps_tt; +}; + +/** + * struct ttm_kmap_iter - Iterator for kmap_local type operations on a + * resource. + * @ops: Pointer to the operations struct. + * + * This struct is intended to be embedded in a resource-specific specialization + * implementing operations for the resource. + * + * Nothing stops us from extending the operations to vmap, vmap_pfn etc, + * replacing some or parts of the ttm_bo_util. cpu-map functionality. + */ +struct ttm_kmap_iter { + const struct ttm_kmap_iter_ops *ops; +}; + +#endif /* __TTM_KMAP_ITER_H__ */ diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h new file mode 100644 index 000000000..8074d0f6c --- /dev/null +++ b/include/drm/ttm/ttm_placement.h @@ -0,0 +1,101 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_PLACEMENT_H_ +#define _TTM_PLACEMENT_H_ + +#include <linux/types.h> + +/* + * Memory regions for data placement. + * + * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can + * be swapped out whenever TTMs thinks it is a good idea. + * In cases where drivers would like to use TTM_PL_SYSTEM as a valid + * placement they need to be able to handle the issues that arise due to the + * above manually. + * + * For BO's which reside in system memory but for which the accelerator + * requires direct access (i.e. their usage needs to be synchronized + * between the CPU and accelerator via fences) a new, driver private + * placement that can handle such scenarios is a good idea. + */ + +#define TTM_PL_SYSTEM 0 +#define TTM_PL_TT 1 +#define TTM_PL_VRAM 2 +#define TTM_PL_PRIV 3 + +/* + * TTM_PL_FLAG_TOPDOWN requests to be placed from the + * top of the memory area, instead of the bottom. + */ + +#define TTM_PL_FLAG_CONTIGUOUS (1 << 0) +#define TTM_PL_FLAG_TOPDOWN (1 << 1) + +/* For multihop handling */ +#define TTM_PL_FLAG_TEMPORARY (1 << 2) + +/** + * struct ttm_place + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @mem_type: One of TTM_PL_* where the resource should be allocated from. + * @flags: memory domain and caching flags for the object + * + * Structure indicating a possible place to put an object. + */ +struct ttm_place { + unsigned fpfn; + unsigned lpfn; + uint32_t mem_type; + uint32_t flags; +}; + +/** + * struct ttm_placement + * + * @num_placement: number of preferred placements + * @placement: preferred placements + * @num_busy_placement: number of preferred placements when need to evict buffer + * @busy_placement: preferred placements when need to evict buffer + * + * Structure indicating the placement you request for an object. + */ +struct ttm_placement { + unsigned num_placement; + const struct ttm_place *placement; + unsigned num_busy_placement; + const struct ttm_place *busy_placement; +}; + +#endif diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h new file mode 100644 index 000000000..ef09b23d2 --- /dev/null +++ b/include/drm/ttm/ttm_pool.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_PAGE_POOL_H_ +#define _TTM_PAGE_POOL_H_ + +#include <linux/mmzone.h> +#include <linux/llist.h> +#include <linux/spinlock.h> +#include <drm/ttm/ttm_caching.h> + +struct device; +struct ttm_tt; +struct ttm_pool; +struct ttm_operation_ctx; + +/** + * struct ttm_pool_type - Pool for a certain memory type + * + * @pool: the pool we belong to, might be NULL for the global ones + * @order: the allocation order our pages have + * @caching: the caching type our pages have + * @shrinker_list: our place on the global shrinker list + * @lock: protection of the page list + * @pages: the list of pages in the pool + */ +struct ttm_pool_type { + struct ttm_pool *pool; + unsigned int order; + enum ttm_caching caching; + + struct list_head shrinker_list; + + spinlock_t lock; + struct list_head pages; +}; + +/** + * struct ttm_pool - Pool for all caching and orders + * + * @dev: the device we allocate pages for + * @use_dma_alloc: if coherent DMA allocations should be used + * @use_dma32: if GFP_DMA32 should be used + * @caching: pools for each caching/order + */ +struct ttm_pool { + struct device *dev; + + bool use_dma_alloc; + bool use_dma32; + + struct { + struct ttm_pool_type orders[MAX_ORDER]; + } caching[TTM_NUM_CACHING_TYPES]; +}; + +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx); +void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); + +void ttm_pool_init(struct ttm_pool *pool, struct device *dev, + bool use_dma_alloc, bool use_dma32); +void ttm_pool_fini(struct ttm_pool *pool); + +int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); + +int ttm_pool_mgr_init(unsigned long num_pages); +void ttm_pool_mgr_fini(void); + +#endif diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h new file mode 100644 index 000000000..7963b957e --- /dev/null +++ b/include/drm/ttm/ttm_range_manager.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef _TTM_RANGE_MANAGER_H_ +#define _TTM_RANGE_MANAGER_H_ + +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_device.h> +#include <drm/drm_mm.h> + +/** + * struct ttm_range_mgr_node + * + * @base: base clase we extend + * @mm_nodes: MM nodes, usually 1 + * + * Extending the ttm_resource object to manage an address space allocation with + * one or more drm_mm_nodes. + */ +struct ttm_range_mgr_node { + struct ttm_resource base; + struct drm_mm_node mm_nodes[]; +}; + +/** + * to_ttm_range_mgr_node + * + * @res: the resource to upcast + * + * Upcast the ttm_resource object into a ttm_range_mgr_node object. + */ +static inline struct ttm_range_mgr_node * +to_ttm_range_mgr_node(struct ttm_resource *res) +{ + return container_of(res, struct ttm_range_mgr_node, base); +} + +int ttm_range_man_init_nocheck(struct ttm_device *bdev, + unsigned type, bool use_tt, + unsigned long p_size); +int ttm_range_man_fini_nocheck(struct ttm_device *bdev, + unsigned type); +static __always_inline int ttm_range_man_init(struct ttm_device *bdev, + unsigned int type, bool use_tt, + unsigned long p_size) +{ + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); + return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size); +} + +static __always_inline int ttm_range_man_fini(struct ttm_device *bdev, + unsigned int type) +{ + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); + return ttm_range_man_fini_nocheck(bdev, type); +} +#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h new file mode 100644 index 000000000..5afc6d664 --- /dev/null +++ b/include/drm/ttm/ttm_resource.h @@ -0,0 +1,428 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_RESOURCE_H_ +#define _TTM_RESOURCE_H_ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/iosys-map.h> +#include <linux/dma-fence.h> + +#include <drm/drm_print.h> +#include <drm/ttm/ttm_caching.h> +#include <drm/ttm/ttm_kmap_iter.h> + +#define TTM_MAX_BO_PRIORITY 4U +#define TTM_NUM_MEM_TYPES 8 + +struct ttm_device; +struct ttm_resource_manager; +struct ttm_resource; +struct ttm_place; +struct ttm_buffer_object; +struct ttm_placement; +struct iosys_map; +struct io_mapping; +struct sg_table; +struct scatterlist; + +struct ttm_resource_manager_func { + /** + * struct ttm_resource_manager_func member alloc + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @place: Placement details. + * @res: Resulting pointer to the ttm_resource. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if applicable are given by @place. If + * successful, a filled in ttm_resource object should be returned in + * @res. @res::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the manager can't fulfill the request -ENOSPC should be returned. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * This function may not be called from within atomic context and needs + * to take care of its own locking to protect any data structures + * managing the space. + */ + int (*alloc)(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource **res); + + /** + * struct ttm_resource_manager_func member free + * + * @man: Pointer to a memory type manager. + * @res: Pointer to a struct ttm_resource to be freed. + * + * This function frees memory type resources previously allocated. + * May not be called from within atomic context. + */ + void (*free)(struct ttm_resource_manager *man, + struct ttm_resource *res); + + /** + * struct ttm_resource_manager_func member intersects + * + * @man: Pointer to a memory type manager. + * @res: Pointer to a struct ttm_resource to be checked. + * @place: Placement to check against. + * @size: Size of the check. + * + * Test if @res intersects with @place + @size. Used to judge if + * evictions are valueable or not. + */ + bool (*intersects)(struct ttm_resource_manager *man, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size); + + /** + * struct ttm_resource_manager_func member compatible + * + * @man: Pointer to a memory type manager. + * @res: Pointer to a struct ttm_resource to be checked. + * @place: Placement to check against. + * @size: Size of the check. + * + * Test if @res compatible with @place + @size. Used to check of + * the need to move the backing store or not. + */ + bool (*compatible)(struct ttm_resource_manager *man, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size); + + /** + * struct ttm_resource_manager_func member debug + * + * @man: Pointer to a memory type manager. + * @printer: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_resource_manager *man, + struct drm_printer *printer); +}; + +/** + * struct ttm_resource_manager + * + * @use_type: The memory type is enabled. + * @use_tt: If a TT object should be used for the backing store. + * @size: Size of the managed region. + * @bdev: ttm device this manager belongs to + * @func: structure pointer implementing the range manager. See above + * @move_lock: lock for move fence + * @move: The fence of the last pipelined move operation. + * @lru: The lru list for this memory type. + * + * This structure is used to identify and manage memory types for a device. + */ +struct ttm_resource_manager { + /* + * No protection. Constant from start. + */ + bool use_type; + bool use_tt; + struct ttm_device *bdev; + uint64_t size; + const struct ttm_resource_manager_func *func; + spinlock_t move_lock; + + /* + * Protected by @move_lock. + */ + struct dma_fence *move; + + /* + * Protected by the bdev->lru_lock. + */ + struct list_head lru[TTM_MAX_BO_PRIORITY]; + + /** + * @usage: How much of the resources are used, protected by the + * bdev->lru_lock. + */ + uint64_t usage; +}; + +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @offset: physical addr + * @is_iomem: is this io memory ? + * @caching: See enum ttm_caching + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t offset; + bool is_iomem; + enum ttm_caching caching; +}; + +/** + * struct ttm_resource + * + * @start: Start of the allocation. + * @num_pages: Actual size of resource in pages. + * @mem_type: Resource type of the allocation. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * @bo: weak reference to the BO, protected by ttm_device::lru_lock + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ +struct ttm_resource { + unsigned long start; + unsigned long num_pages; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; + struct ttm_buffer_object *bo; + + /** + * @lru: Least recently used list, see &ttm_resource_manager.lru + */ + struct list_head lru; +}; + +/** + * struct ttm_resource_cursor + * + * @priority: the current priority + * + * Cursor to iterate over the resources in a manager. + */ +struct ttm_resource_cursor { + unsigned int priority; +}; + +/** + * struct ttm_lru_bulk_move_pos + * + * @first: first res in the bulk move range + * @last: last res in the bulk move range + * + * Range of resources for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_resource *first; + struct ttm_resource *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @pos: first/last lru entry for resources in the each domain/priority + * + * Container for the current bulk move state. Should be used with + * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; +}; + +/** + * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping + + * struct sg_table backed struct ttm_resource. + * @base: Embedded struct ttm_kmap_iter providing the usage interface. + * @iomap: struct io_mapping representing the underlying linear io_memory. + * @st: sg_table into @iomap, representing the memory of the struct ttm_resource. + * @start: Offset that needs to be subtracted from @st to make + * sg_dma_address(st->sgl) - @start == 0 for @iomap start. + * @cache: Scatterlist traversal cache for fast lookups. + * @cache.sg: Pointer to the currently cached scatterlist segment. + * @cache.i: First index of @sg. PAGE_SIZE granularity. + * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity. + * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity. + */ +struct ttm_kmap_iter_iomap { + struct ttm_kmap_iter base; + struct io_mapping *iomap; + struct sg_table *st; + resource_size_t start; + struct { + struct scatterlist *sg; + pgoff_t i; + pgoff_t end; + pgoff_t offs; + } cache; +}; + +/** + * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io + * @base: The base iterator + * @dmap: Points to the starting address of the region + * @needs_unmap: Whether we need to unmap on fini + */ +struct ttm_kmap_iter_linear_io { + struct ttm_kmap_iter base; + struct iosys_map dmap; + bool needs_unmap; +}; + +/** + * ttm_resource_manager_set_used + * + * @man: A memory manager object. + * @used: usage state to set. + * + * Set the manager in use flag. If disabled the manager is no longer + * used for object placement. + */ +static inline void +ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) +{ + int i; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; i++) + WARN_ON(!list_empty(&man->lru[i])); + man->use_type = used; +} + +/** + * ttm_resource_manager_used + * + * @man: Manager to get used state for + * + * Get the in use flag for a manager. + * Returns: + * true is used, false if not. + */ +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) +{ + return man->use_type; +} + +/** + * ttm_resource_manager_cleanup + * + * @man: A memory manager object. + * + * Cleanup the move fences from the memory manager object. + */ +static inline void +ttm_resource_manager_cleanup(struct ttm_resource_manager *man) +{ + dma_fence_put(man->move); + man->move = NULL; +} + +void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); +void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); + +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo); +void ttm_resource_move_to_lru_tail(struct ttm_resource *res); + +void ttm_resource_init(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); +void ttm_resource_fini(struct ttm_resource_manager *man, + struct ttm_resource *res); + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource **res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); +bool ttm_resource_intersects(struct ttm_device *bdev, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size); +bool ttm_resource_compatible(struct ttm_device *bdev, + struct ttm_resource *res, + const struct ttm_place *place, + size_t size); +bool ttm_resource_compat(struct ttm_resource *res, + struct ttm_placement *placement); +void ttm_resource_set_bo(struct ttm_resource *res, + struct ttm_buffer_object *bo); + +void ttm_resource_manager_init(struct ttm_resource_manager *man, + struct ttm_device *bdev, + uint64_t size); + +int ttm_resource_manager_evict_all(struct ttm_device *bdev, + struct ttm_resource_manager *man); + +uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man); +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p); + +struct ttm_resource * +ttm_resource_manager_first(struct ttm_resource_manager *man, + struct ttm_resource_cursor *cursor); +struct ttm_resource * +ttm_resource_manager_next(struct ttm_resource_manager *man, + struct ttm_resource_cursor *cursor, + struct ttm_resource *res); + +/** + * ttm_resource_manager_for_each_res - iterate over all resources + * @man: the resource manager + * @cursor: struct ttm_resource_cursor for the current position + * @res: the current resource + * + * Iterate over all the evictable resources in a resource manager. + */ +#define ttm_resource_manager_for_each_res(man, cursor, res) \ + for (res = ttm_resource_manager_first(man, cursor); res; \ + res = ttm_resource_manager_next(man, cursor, res)) + +struct ttm_kmap_iter * +ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io, + struct io_mapping *iomap, + struct sg_table *st, + resource_size_t start); + +struct ttm_kmap_iter_linear_io; + +struct ttm_kmap_iter * +ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); + +void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, + struct ttm_device *bdev, + struct ttm_resource *mem); + +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name); +#endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h new file mode 100644 index 000000000..b7d3f3843 --- /dev/null +++ b/include/drm/ttm/ttm_tt.h @@ -0,0 +1,250 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#ifndef _TTM_TT_H_ +#define _TTM_TT_H_ + +#include <linux/pagemap.h> +#include <linux/types.h> +#include <drm/ttm/ttm_caching.h> +#include <drm/ttm/ttm_kmap_iter.h> + +struct ttm_device; +struct ttm_tt; +struct ttm_resource; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +/** + * struct ttm_tt - This is a structure holding the pages, caching- and aperture + * binding status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_tt { + /** @pages: Array of pages backing the data. */ + struct page **pages; + /** + * @page_flags: The page flags. + * + * Supported values: + * + * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated + * and swapped out by TTM. Calling ttm_tt_populate() will then swap the + * pages back in, and unset the flag. Drivers should in general never + * need to touch this. + * + * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on + * allocation. + * + * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated + * externally, like with dma-buf or userptr. This effectively disables + * TTM swapping out such pages. Also important is to prevent TTM from + * ever directly mapping these pages. + * + * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable + * this flag. + * + * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as + * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is + * still valid to use TTM to map the pages directly. This is useful when + * implementing a ttm_tt backend which still allocates driver owned + * pages underneath(say with shmem). + * + * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage + * here should always be: + * + * page_flags = TTM_TT_FLAG_EXTERNAL | + * TTM_TT_FLAG_EXTERNAL_MAPPABLE; + * + * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is + * set by TTM after ttm_tt_populate() has successfully returned, and is + * then unset when TTM calls ttm_tt_unpopulate(). + */ +#define TTM_TT_FLAG_SWAPPED (1 << 0) +#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1) +#define TTM_TT_FLAG_EXTERNAL (1 << 2) +#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3) + +#define TTM_TT_FLAG_PRIV_POPULATED (1U << 31) + uint32_t page_flags; + /** @num_pages: Number of pages in the page array. */ + uint32_t num_pages; + /** @sg: for SG objects via dma-buf. */ + struct sg_table *sg; + /** @dma_address: The DMA (bus) addresses of the pages. */ + dma_addr_t *dma_address; + /** @swap_storage: Pointer to shmem struct file for swap storage. */ + struct file *swap_storage; + /** + * @caching: The current caching state of the pages, see enum + * ttm_caching. + */ + enum ttm_caching caching; +}; + +/** + * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt. + * @base: Embedded struct ttm_kmap_iter providing the usage interface + * @tt: Cached struct ttm_tt. + * @prot: Cached page protection for mapping. + */ +struct ttm_kmap_iter_tt { + struct ttm_kmap_iter base; + struct ttm_tt *tt; + pgprot_t prot; +}; + +static inline bool ttm_tt_is_populated(struct ttm_tt *tt) +{ + return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED; +} + +/** + * ttm_tt_create + * + * @bo: pointer to a struct ttm_buffer_object + * @zero_alloc: true if allocated pages needs to be zeroed + * + * Make sure we have a TTM structure allocated for the given BO. + * No pages are actually allocated. + */ +int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); + +/** + * ttm_tt_init + * + * @ttm: The struct ttm_tt. + * @bo: The buffer object we create the ttm for. + * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags. + * @caching: the desired caching state of the pages + * @extra_pages: Extra pages needed for the driver. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, + uint32_t page_flags, enum ttm_caching caching, + unsigned long extra_pages); +int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, + uint32_t page_flags, enum ttm_caching caching); + +/** + * ttm_tt_fini + * + * @ttm: the ttm_tt structure. + * + * Free memory of ttm_tt structure + */ +void ttm_tt_fini(struct ttm_tt *ttm); + +/** + * ttm_tt_destroy: + * + * @bdev: the ttm_device this object belongs to + * @ttm: The struct ttm_tt. + * + * Unbind, unpopulate and destroy common struct ttm_tt. + */ +void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); + +/** + * ttm_tt_swapin: + * + * @ttm: The struct ttm_tt. + * + * Swap in a previously swap out ttm_tt. + */ +int ttm_tt_swapin(struct ttm_tt *ttm); +int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, + gfp_t gfp_flags); + +/** + * ttm_tt_populate - allocate pages for a ttm + * + * @bdev: the ttm_device this object belongs to + * @ttm: Pointer to the ttm_tt structure + * @ctx: operation context for populating the tt object. + * + * Calls the driver method to allocate pages for a ttm + */ +int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + +/** + * ttm_tt_unpopulate - free pages from a ttm + * + * @bdev: the ttm_device this object belongs to + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to free all pages from a ttm + */ +void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); + +/** + * ttm_tt_mark_for_clear - Mark pages for clearing on populate. + * + * @ttm: Pointer to the ttm_tt structure + * + * Marks pages for clearing so that the next time the page vector is + * populated, the pages will be cleared. + */ +static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) +{ + ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC; +} + +void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); + +struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt, + struct ttm_tt *tt); + +#if IS_ENABLED(CONFIG_AGP) +#include <linux/agp_backend.h> + +/** + * ttm_agp_tt_create + * + * @bo: Buffer object we allocate the ttm for. + * @bridge: The agp bridge this device is sitting on. + * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags. + * + * + * Create a TTM backend that uses the indicated AGP bridge as an aperture + * for TT memory. This function uses the linux agpgart interface to + * bind and unbind memory backing a ttm_tt. + */ +struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, + struct agp_bridge_data *bridge, + uint32_t page_flags); +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem); +void ttm_agp_unbind(struct ttm_tt *ttm); +void ttm_agp_destroy(struct ttm_tt *ttm); +bool ttm_agp_is_bound(struct ttm_tt *ttm); +#endif + +#endif |