diff options
Diffstat (limited to '')
108 files changed, 30404 insertions, 0 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h new file mode 100644 index 000000000..8712e1499 --- /dev/null +++ b/include/drm/amd_asic_type.h @@ -0,0 +1,64 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_ASIC_TYPE_H__ +#define __AMD_ASIC_TYPE_H__ +/* + * Supported ASIC types + */ +enum amd_asic_type { + CHIP_TAHITI = 0, + CHIP_PITCAIRN, /* 1 */ + CHIP_VERDE, /* 2 */ + CHIP_OLAND, /* 3 */ + CHIP_HAINAN, /* 4 */ + CHIP_BONAIRE, /* 5 */ + CHIP_KAVERI, /* 6 */ + CHIP_KABINI, /* 7 */ + CHIP_HAWAII, /* 8 */ + CHIP_MULLINS, /* 9 */ + CHIP_TOPAZ, /* 10 */ + CHIP_TONGA, /* 11 */ + CHIP_FIJI, /* 12 */ + CHIP_CARRIZO, /* 13 */ + CHIP_STONEY, /* 14 */ + CHIP_POLARIS10, /* 15 */ + CHIP_POLARIS11, /* 16 */ + CHIP_POLARIS12, /* 17 */ + CHIP_VEGAM, /* 18 */ + CHIP_VEGA10, /* 19 */ + CHIP_VEGA12, /* 20 */ + CHIP_VEGA20, /* 21 */ + CHIP_RAVEN, /* 22 */ + CHIP_ARCTURUS, /* 23 */ + CHIP_RENOIR, /* 24 */ + CHIP_NAVI10, /* 25 */ + CHIP_NAVI14, /* 26 */ + CHIP_NAVI12, /* 27 */ + CHIP_SIENNA_CICHLID, /* 28 */ + CHIP_NAVY_FLOUNDER, /* 29 */ + CHIP_LAST, +}; + +extern const char *amdgpu_asic_name[]; + +#endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h new file mode 100644 index 000000000..b0dcc0733 --- /dev/null +++ b/include/drm/bridge/analogix_dp.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Analogix DP (Display Port) Core interface driver. + * + * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + */ +#ifndef _ANALOGIX_DP_H_ +#define _ANALOGIX_DP_H_ + +#include <drm/drm_crtc.h> + +struct analogix_dp_device; + +enum analogix_dp_devtype { + EXYNOS_DP, + RK3288_DP, + RK3399_EDP, +}; + +static inline bool is_rockchip(enum analogix_dp_devtype type) +{ + return type == RK3288_DP || type == RK3399_EDP; +} + +struct analogix_dp_plat_data { + enum analogix_dp_devtype dev_type; + struct drm_panel *panel; + struct drm_encoder *encoder; + struct drm_connector *connector; + bool skip_connector; + + int (*power_on_start)(struct analogix_dp_plat_data *); + int (*power_on_end)(struct analogix_dp_plat_data *); + int (*power_off)(struct analogix_dp_plat_data *); + int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, + struct drm_connector *); + int (*get_modes)(struct analogix_dp_plat_data *, + struct drm_connector *); +}; + +int analogix_dp_resume(struct analogix_dp_device *dp); +int analogix_dp_suspend(struct analogix_dp_device *dp); + +struct analogix_dp_device * +analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data); +int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev); +void analogix_dp_unbind(struct analogix_dp_device *dp); +void analogix_dp_remove(struct analogix_dp_device *dp); + +int analogix_dp_start_crc(struct drm_connector *connector); +int analogix_dp_stop_crc(struct drm_connector *connector); + +#endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h new file mode 100644 index 000000000..ea34ca146 --- /dev/null +++ b/include/drm/bridge/dw_hdmi.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2011 Freescale Semiconductor, Inc. + */ + +#ifndef __DW_HDMI__ +#define __DW_HDMI__ + +#include <sound/hdmi-codec.h> + +struct drm_display_info; +struct drm_display_mode; +struct drm_encoder; +struct dw_hdmi; +struct platform_device; + +/** + * DOC: Supported input formats and encodings + * + * Depending on the Hardware configuration of the Controller IP, it supports + * a subset of the following input formats and encodings on its internal + * 48bit bus. + * + * +----------------------+----------------------------------+------------------------------+ + * | Format Name | Format Code | Encodings | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 8bit | ``MEDIA_BUS_FMT_RGB888_1X24`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 10bits | ``MEDIA_BUS_FMT_RGB101010_1X30`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 12bits | ``MEDIA_BUS_FMT_RGB121212_1X36`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | RGB 4:4:4 16bits | ``MEDIA_BUS_FMT_RGB161616_1X48`` | ``V4L2_YCBCR_ENC_DEFAULT`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 8bit | ``MEDIA_BUS_FMT_YUV8_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 10bits | ``MEDIA_BUS_FMT_YUV10_1X30`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 12bits | ``MEDIA_BUS_FMT_YUV12_1X36`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:4:4 16bits | ``MEDIA_BUS_FMT_YUV16_1X48`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * | | | or ``V4L2_YCBCR_ENC_XV601`` | + * | | | or ``V4L2_YCBCR_ENC_XV709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 8bit | ``MEDIA_BUS_FMT_UYVY8_1X16`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 10bits | ``MEDIA_BUS_FMT_UYVY10_1X20`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:2 12bits | ``MEDIA_BUS_FMT_UYVY12_1X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 8bit | ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` | ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 10bits | ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 12bits | ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + * | YCbCr 4:2:0 16bits | ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``| ``V4L2_YCBCR_ENC_601`` | + * | | | or ``V4L2_YCBCR_ENC_709`` | + * +----------------------+----------------------------------+------------------------------+ + */ + +enum { + DW_HDMI_RES_8, + DW_HDMI_RES_10, + DW_HDMI_RES_12, + DW_HDMI_RES_MAX, +}; + +enum dw_hdmi_phy_type { + DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, + DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, + DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, + DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, + DW_HDMI_PHY_VENDOR_PHY = 0xfe, +}; + +struct dw_hdmi_mpll_config { + unsigned long mpixelclock; + struct { + u16 cpce; + u16 gmp; + } res[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_curr_ctrl { + unsigned long mpixelclock; + u16 curr[DW_HDMI_RES_MAX]; +}; + +struct dw_hdmi_phy_config { + unsigned long mpixelclock; + u16 sym_ctr; /*clock symbol and transmitter control*/ + u16 term; /*transmission termination value*/ + u16 vlev_ctr; /* voltage level control */ +}; + +struct dw_hdmi_phy_ops { + int (*init)(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *display, + const struct drm_display_mode *mode); + void (*disable)(struct dw_hdmi *hdmi, void *data); + enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); + void (*update_hpd)(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); + void (*setup_hpd)(struct dw_hdmi *hdmi, void *data); +}; + +struct dw_hdmi_plat_data { + struct regmap *regm; + + unsigned long input_bus_encoding; + bool use_drm_infoframe; + bool ycbcr_420_allowed; + + /* + * Private data passed to all the .mode_valid() and .configure_phy() + * callback functions. + */ + void *priv_data; + + /* Platform-specific mode validation (optional). */ + enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, + const struct drm_display_info *info, + const struct drm_display_mode *mode); + + /* Vendor PHY support */ + const struct dw_hdmi_phy_ops *phy_ops; + const char *phy_name; + void *phy_data; + unsigned int phy_force_vendor; + + /* Synopsys PHY support */ + const struct dw_hdmi_mpll_config *mpll_cfg; + const struct dw_hdmi_curr_ctrl *cur_ctr; + const struct dw_hdmi_phy_config *phy_config; + int (*configure_phy)(struct dw_hdmi *hdmi, void *data, + unsigned long mpixelclock); +}; + +struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_remove(struct dw_hdmi *hdmi); +void dw_hdmi_unbind(struct dw_hdmi *hdmi); +struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_plat_data *plat_data); + +void dw_hdmi_resume(struct dw_hdmi *hdmi); + +void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); + +int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev); +void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); +void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); +void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); +void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); +void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); +void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); +void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, + const struct drm_display_info *display); + +/* PHY configuration */ +void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); +void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, + unsigned char addr); + +void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); +void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); + +enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, + void *data); +void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, + bool force, bool disabled, bool rxsense); +void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); + +#endif /* __IMX_HDMI_H__ */ diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h new file mode 100644 index 000000000..bda8aa7c2 --- /dev/null +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) STMicroelectronics SA 2017 + * + * Authors: Philippe Cornu <philippe.cornu@st.com> + * Yannick Fertre <yannick.fertre@st.com> + */ + +#ifndef __DW_MIPI_DSI__ +#define __DW_MIPI_DSI__ + +#include <linux/types.h> + +#include <drm/drm_modes.h> + +struct drm_display_mode; +struct drm_encoder; +struct dw_mipi_dsi; +struct mipi_dsi_device; +struct platform_device; + +struct dw_mipi_dsi_dphy_timing { + u16 data_hs2lp; + u16 data_lp2hs; + u16 clk_hs2lp; + u16 clk_lp2hs; +}; + +struct dw_mipi_dsi_phy_ops { + int (*init)(void *priv_data); + void (*power_on)(void *priv_data); + void (*power_off)(void *priv_data); + int (*get_lane_mbps)(void *priv_data, + const struct drm_display_mode *mode, + unsigned long mode_flags, u32 lanes, u32 format, + unsigned int *lane_mbps); + int (*get_timing)(void *priv_data, unsigned int lane_mbps, + struct dw_mipi_dsi_dphy_timing *timing); + int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate); +}; + +struct dw_mipi_dsi_host_ops { + int (*attach)(void *priv_data, + struct mipi_dsi_device *dsi); + int (*detach)(void *priv_data, + struct mipi_dsi_device *dsi); +}; + +struct dw_mipi_dsi_plat_data { + void __iomem *base; + unsigned int max_data_lanes; + + enum drm_mode_status (*mode_valid)(void *priv_data, + const struct drm_display_mode *mode); + + const struct dw_mipi_dsi_phy_ops *phy_ops; + const struct dw_mipi_dsi_host_ops *host_ops; + + void *priv_data; +}; + +struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev, + const struct dw_mipi_dsi_plat_data + *plat_data); +void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); +int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); +void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); +void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); + +#endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h new file mode 100644 index 000000000..d96626a0e --- /dev/null +++ b/include/drm/bridge/mhl.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Defines for Mobile High-Definition Link (MHL) interface + * + * Copyright (C) 2015, Samsung Electronics, Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + * + * Based on MHL driver for Android devices. + * Copyright (C) 2013-2014 Silicon Image, Inc. + */ + +#ifndef __MHL_H__ +#define __MHL_H__ + +#include <linux/types.h> + +/* Device Capabilities Registers */ +enum { + MHL_DCAP_DEV_STATE, + MHL_DCAP_MHL_VERSION, + MHL_DCAP_CAT, + MHL_DCAP_ADOPTER_ID_H, + MHL_DCAP_ADOPTER_ID_L, + MHL_DCAP_VID_LINK_MODE, + MHL_DCAP_AUD_LINK_MODE, + MHL_DCAP_VIDEO_TYPE, + MHL_DCAP_LOG_DEV_MAP, + MHL_DCAP_BANDWIDTH, + MHL_DCAP_FEATURE_FLAG, + MHL_DCAP_DEVICE_ID_H, + MHL_DCAP_DEVICE_ID_L, + MHL_DCAP_SCRATCHPAD_SIZE, + MHL_DCAP_INT_STAT_SIZE, + MHL_DCAP_RESERVED, + MHL_DCAP_SIZE +}; + +#define MHL_DCAP_CAT_SINK 0x01 +#define MHL_DCAP_CAT_SOURCE 0x02 +#define MHL_DCAP_CAT_POWER 0x10 +#define MHL_DCAP_CAT_PLIM(x) ((x) << 5) + +#define MHL_DCAP_VID_LINK_RGB444 0x01 +#define MHL_DCAP_VID_LINK_YCBCR444 0x02 +#define MHL_DCAP_VID_LINK_YCBCR422 0x04 +#define MHL_DCAP_VID_LINK_PPIXEL 0x08 +#define MHL_DCAP_VID_LINK_ISLANDS 0x10 +#define MHL_DCAP_VID_LINK_VGA 0x20 +#define MHL_DCAP_VID_LINK_16BPP 0x40 + +#define MHL_DCAP_AUD_LINK_2CH 0x01 +#define MHL_DCAP_AUD_LINK_8CH 0x02 + +#define MHL_DCAP_VT_GRAPHICS 0x00 +#define MHL_DCAP_VT_PHOTO 0x02 +#define MHL_DCAP_VT_CINEMA 0x04 +#define MHL_DCAP_VT_GAMES 0x08 +#define MHL_DCAP_SUPP_VT 0x80 + +#define MHL_DCAP_LD_DISPLAY 0x01 +#define MHL_DCAP_LD_VIDEO 0x02 +#define MHL_DCAP_LD_AUDIO 0x04 +#define MHL_DCAP_LD_MEDIA 0x08 +#define MHL_DCAP_LD_TUNER 0x10 +#define MHL_DCAP_LD_RECORD 0x20 +#define MHL_DCAP_LD_SPEAKER 0x40 +#define MHL_DCAP_LD_GUI 0x80 +#define MHL_DCAP_LD_ALL 0xFF + +#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01 +#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02 +#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04 +#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08 +#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10 +#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40 + +/* Extended Device Capabilities Registers */ +enum { + MHL_XDC_ECBUS_SPEEDS, + MHL_XDC_TMDS_SPEEDS, + MHL_XDC_ECBUS_ROLES, + MHL_XDC_LOG_DEV_MAPX, + MHL_XDC_SIZE +}; + +#define MHL_XDC_ECBUS_S_075 0x01 +#define MHL_XDC_ECBUS_S_8BIT 0x02 +#define MHL_XDC_ECBUS_S_12BIT 0x04 +#define MHL_XDC_ECBUS_D_150 0x10 +#define MHL_XDC_ECBUS_D_8BIT 0x20 + +#define MHL_XDC_TMDS_000 0x00 +#define MHL_XDC_TMDS_150 0x01 +#define MHL_XDC_TMDS_300 0x02 +#define MHL_XDC_TMDS_600 0x04 + +/* MHL_XDC_ECBUS_ROLES flags */ +#define MHL_XDC_DEV_HOST 0x01 +#define MHL_XDC_DEV_DEVICE 0x02 +#define MHL_XDC_DEV_CHARGER 0x04 +#define MHL_XDC_HID_HOST 0x08 +#define MHL_XDC_HID_DEVICE 0x10 + +/* MHL_XDC_LOG_DEV_MAPX flags */ +#define MHL_XDC_LD_PHONE 0x01 + +/* Device Status Registers */ +enum { + MHL_DST_CONNECTED_RDY, + MHL_DST_LINK_MODE, + MHL_DST_VERSION, + MHL_DST_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_DST_OFFSET 0x30 +#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name) + +#define MHL_DST_CONN_DCAP_RDY 0x01 +#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02 +#define MHL_DST_CONN_POW_STAT 0x04 +#define MHL_DST_CONN_PLIM_STAT_MASK 0x38 + +#define MHL_DST_LM_CLK_MODE_MASK 0x07 +#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02 +#define MHL_DST_LM_CLK_MODE_NORMAL 0x03 +#define MHL_DST_LM_PATH_EN_MASK 0x08 +#define MHL_DST_LM_PATH_ENABLED 0x08 +#define MHL_DST_LM_PATH_DISABLED 0x00 +#define MHL_DST_LM_MUTED_MASK 0x10 + +/* Extended Device Status Registers */ +enum { + MHL_XDS_CURR_ECBUS_MODE, + MHL_XDS_AVLINK_MODE_STATUS, + MHL_XDS_AVLINK_MODE_CONTROL, + MHL_XDS_MULTI_SINK_STATUS, + MHL_XDS_SIZE +}; + +/* Offset of XDEVSTAT registers */ +#define MHL_XDS_OFFSET 0x90 +#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name) + +/* MHL_XDS_REG_CURR_ECBUS_MODE flags */ +#define MHL_XDS_SLOT_MODE_8BIT 0x00 +#define MHL_XDS_SLOT_MODE_6BIT 0x01 +#define MHL_XDS_ECBUS_S 0x04 +#define MHL_XDS_ECBUS_D 0x08 + +#define MHL_XDS_LINK_CLOCK_75MHZ 0x00 +#define MHL_XDS_LINK_CLOCK_150MHZ 0x10 +#define MHL_XDS_LINK_CLOCK_300MHZ 0x20 +#define MHL_XDS_LINK_CLOCK_600MHZ 0x30 + +#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00 +#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01 +#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02 +#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03 + +#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00 +#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01 +#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02 +#define MHL_XDS_ATT_CAPABLE 0x08 + +#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01 +#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04 +#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10 +#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00 +#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40 + +/* Interrupt Registers */ +enum { + MHL_INT_RCHANGE, + MHL_INT_DCHANGE, + MHL_INT_SIZE +}; + +/* Offset of DEVSTAT registers */ +#define MHL_INT_OFFSET 0x20 +#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name) + +#define MHL_INT_RC_DCAP_CHG 0x01 +#define MHL_INT_RC_DSCR_CHG 0x02 +#define MHL_INT_RC_REQ_WRT 0x04 +#define MHL_INT_RC_GRT_WRT 0x08 +#define MHL_INT_RC_3D_REQ 0x10 +#define MHL_INT_RC_FEAT_REQ 0x20 +#define MHL_INT_RC_FEAT_COMPLETE 0x40 + +#define MHL_INT_DC_EDID_CHG 0x02 + +enum { + MHL_ACK = 0x33, /* Command or Data byte acknowledge */ + MHL_NACK = 0x34, /* Command or Data byte not acknowledge */ + MHL_ABORT = 0x35, /* Transaction abort */ + MHL_WRITE_STAT = 0xe0, /* Write one status register */ + MHL_SET_INT = 0x60, /* Write one interrupt register */ + MHL_READ_DEVCAP_REG = 0x61, /* Read one register */ + MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */ + MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */ + MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */ + MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */ + MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */ + MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */ + MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */ + MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */ + MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */ + MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */ + MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */ + MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */ + MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */ + MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */ + /* let the rest of these float, they are software specific */ + MHL_READ_EDID_BLOCK, + MHL_SEND_3D_REQ_OR_FEAT_REQ, + MHL_READ_DEVCAP, + MHL_READ_XDEVCAP +}; + +/* MSC message types */ +enum { + MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */ + MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */ + MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */ + MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */ + MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */ + MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */ + MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */ + MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */ + MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */ + MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */ + MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */ + MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */ + MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */ + MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */ + MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */ + MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */ + MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */ + MHL_MSC_MSG_BIST_TRIGGER = 0x60, + MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61, + MHL_MSC_MSG_BIST_READY = 0x62, + MHL_MSC_MSG_BIST_STOP = 0x63, +}; + +/* RAP action codes */ +#define MHL_RAP_POLL 0x00 /* Just do an ack */ +#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */ +#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */ +#define MHL_RAP_CBUS_MODE_DOWN 0x20 +#define MHL_RAP_CBUS_MODE_UP 0x21 + +/* RAPK status codes */ +#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */ +#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */ +#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ +#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ + +/* Bit masks for RCP messages */ +#define MHL_RCP_KEY_RELEASED_MASK 0x80 +#define MHL_RCP_KEY_ID_MASK 0x7F + +/* + * Error status codes for RCPE messages + */ +/* No error. (Not allowed in RCPE messages) */ +#define MHL_RCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RCPE_STATUS_BUSY 0x02 + +/* + * Error status codes for RBPE messages + */ +/* No error. (Not allowed in RBPE messages) */ +#define MHL_RBPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized button code */ +#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01 +/* Responder busy. Initiator may retry message */ +#define MHL_RBPE_STATUS_BUSY 0x02 + +/* + * Error status codes for UCPE messages + */ +/* No error. (Not allowed in UCPE messages) */ +#define MHL_UCPE_STATUS_NO_ERROR 0x00 +/* Unsupported/unrecognized key code */ +#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 + +enum mhl_burst_id { + MHL_BURST_ID_3D_VIC = 0x10, + MHL_BURST_ID_3D_DTD = 0x11, + MHL_BURST_ID_HEV_VIC = 0x20, + MHL_BURST_ID_HEV_DTDA = 0x21, + MHL_BURST_ID_HEV_DTDB = 0x22, + MHL_BURST_ID_VC_ASSIGN = 0x38, + MHL_BURST_ID_VC_CONFIRM = 0x39, + MHL_BURST_ID_AUD_DELAY = 0x40, + MHL_BURST_ID_ADT_BURSTID = 0x41, + MHL_BURST_ID_BIST_SETUP = 0x51, + MHL_BURST_ID_BIST_RETURN_STAT = 0x52, + MHL_BURST_ID_EMSC_SUPPORT = 0x61, + MHL_BURST_ID_HID_PAYLOAD = 0x62, + MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63, + MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64, +}; + +struct mhl_burst_blk_rcv_buffer_info { + __be16 id; + __le16 size; +} __packed; + +struct mhl3_burst_header { + __be16 id; + u8 checksum; + u8 total_entries; + u8 sequence_index; +} __packed; + +struct mhl_burst_bits_per_pixel_fmt { + struct mhl3_burst_header hdr; + u8 num_entries; + struct { + u8 stream_id; + u8 pixel_format; + } __packed desc[]; +} __packed; + +struct mhl_burst_emsc_support { + struct mhl3_burst_header hdr; + u8 num_entries; + __be16 burst_id[]; +} __packed; + +struct mhl_burst_audio_descr { + struct mhl3_burst_header hdr; + u8 flags; + u8 short_desc[9]; +} __packed; + +/* + * MHL3 infoframe related definitions + */ + +#define MHL3_IEEE_OUI 0x7ca61d +#define MHL3_INFOFRAME_SIZE 15 + +enum mhl3_video_format { + MHL3_VIDEO_FORMAT_NONE, + MHL3_VIDEO_FORMAT_3D, + MHL3_VIDEO_FORMAT_MULTI_VIEW, + MHL3_VIDEO_FORMAT_DUAL_3D +}; + +enum mhl3_3d_format_type { + MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */ + MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */ + MHL3_3D_FORMAT_TYPE_LR, /* left-right */ + MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */ + MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */ + MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */ +}; + +struct mhl3_infoframe { + unsigned char version; + enum mhl3_video_format video_format; + enum mhl3_3d_format_type format_type; + bool sep_audio; + int hev_format; + int av_delay; +}; + +#endif /* __MHL_H__ */ diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h new file mode 100644 index 000000000..664e120b9 --- /dev/null +++ b/include/drm/drm_agpsupport.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DRM_AGPSUPPORT_H_ +#define _DRM_AGPSUPPORT_H_ + +#include <linux/agp_backend.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <uapi/drm/drm.h> + +struct drm_device; +struct drm_file; + +struct drm_agp_head { + struct agp_kern_info agp_info; + struct list_head memory; + unsigned long mode; + struct agp_bridge_data *bridge; + int enabled; + int acquired; + unsigned long base; + int agp_mtrr; + int cant_use_aperture; + unsigned long page_mask; +}; + +#if IS_ENABLED(CONFIG_AGP) + +void drm_free_agp(struct agp_memory * handle, int pages); +int drm_bind_agp(struct agp_memory * handle, unsigned int start); +int drm_unbind_agp(struct agp_memory * handle); + +struct drm_agp_head *drm_agp_init(struct drm_device *dev); +void drm_legacy_agp_clear(struct drm_device *dev); +int drm_agp_acquire(struct drm_device *dev); +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_release(struct drm_device *dev); +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#else /* CONFIG_AGP */ + +static inline void drm_free_agp(struct agp_memory * handle, int pages) +{ +} + +static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start) +{ + return -ENODEV; +} + +static inline int drm_unbind_agp(struct agp_memory * handle) +{ + return -ENODEV; +} + +static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) +{ + return NULL; +} + +static inline void drm_legacy_agp_clear(struct drm_device *dev) +{ +} + +static inline int drm_agp_acquire(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_release(struct drm_device *dev) +{ + return -ENODEV; +} + +static inline int drm_agp_enable(struct drm_device *dev, + struct drm_agp_mode mode) +{ + return -ENODEV; +} + +static inline int drm_agp_info(struct drm_device *dev, + struct drm_agp_info *info) +{ + return -ENODEV; +} + +static inline int drm_agp_alloc(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_free(struct drm_device *dev, + struct drm_agp_buffer *request) +{ + return -ENODEV; +} + +static inline int drm_agp_unbind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +static inline int drm_agp_bind(struct drm_device *dev, + struct drm_agp_binding *request) +{ + return -ENODEV; +} + +#endif /* CONFIG_AGP */ + +#endif /* _DRM_AGPSUPPORT_H_ */ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h new file mode 100644 index 000000000..d07c851d2 --- /dev/null +++ b/include/drm/drm_atomic.h @@ -0,0 +1,1071 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_H_ +#define DRM_ATOMIC_H_ + +#include <drm/drm_crtc.h> +#include <drm/drm_util.h> + +/** + * struct drm_crtc_commit - track modeset commits on a CRTC + * + * This structure is used to track pending modeset changes and atomic commit on + * a per-CRTC basis. Since updating the list should never block, this structure + * is reference counted to allow waiters to safely wait on an event to complete, + * without holding any locks. + * + * It has 3 different events in total to allow a fine-grained synchronization + * between outstanding updates:: + * + * atomic commit thread hardware + * + * write new state into hardware ----> ... + * signal hw_done + * switch to new state on next + * ... v/hblank + * + * wait for buffers to show up ... + * + * ... send completion irq + * irq handler signals flip_done + * cleanup old buffers + * + * signal cleanup_done + * + * wait for flip_done <---- + * clean up atomic state + * + * The important bit to know is that &cleanup_done is the terminal event, but the + * ordering between &flip_done and &hw_done is entirely up to the specific driver + * and modeset state change. + * + * For an implementation of how to use this look at + * drm_atomic_helper_setup_commit() from the atomic helper library. + */ +struct drm_crtc_commit { + /** + * @crtc: + * + * DRM CRTC for this commit. + */ + struct drm_crtc *crtc; + + /** + * @ref: + * + * Reference count for this structure. Needed to allow blocking on + * completions without the risk of the completion disappearing + * meanwhile. + */ + struct kref ref; + + /** + * @flip_done: + * + * Will be signaled when the hardware has flipped to the new set of + * buffers. Signals at the same time as when the drm event for this + * commit is sent to userspace, or when an out-fence is singalled. Note + * that for most hardware, in most cases this happens after @hw_done is + * signalled. + * + * Completion of this stage is signalled implicitly by calling + * drm_crtc_send_vblank_event() on &drm_crtc_state.event. + */ + struct completion flip_done; + + /** + * @hw_done: + * + * Will be signalled when all hw register changes for this commit have + * been written out. Especially when disabling a pipe this can be much + * later than @flip_done, since that can signal already when the + * screen goes black, whereas to fully shut down a pipe more register + * I/O is required. + * + * Note that this does not need to include separately reference-counted + * resources like backing storage buffer pinning, or runtime pm + * management. + * + * Drivers should call drm_atomic_helper_commit_hw_done() to signal + * completion of this stage. + */ + struct completion hw_done; + + /** + * @cleanup_done: + * + * Will be signalled after old buffers have been cleaned up by calling + * drm_atomic_helper_cleanup_planes(). Since this can only happen after + * a vblank wait completed it might be a bit later. This completion is + * useful to throttle updates and avoid hardware updates getting ahead + * of the buffer cleanup too much. + * + * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal + * completion of this stage. + */ + struct completion cleanup_done; + + /** + * @commit_entry: + * + * Entry on the per-CRTC &drm_crtc.commit_list. Protected by + * $drm_crtc.commit_lock. + */ + struct list_head commit_entry; + + /** + * @event: + * + * &drm_pending_vblank_event pointer to clean up private events. + */ + struct drm_pending_vblank_event *event; + + /** + * @abort_completion: + * + * A flag that's set after drm_atomic_helper_setup_commit() takes a + * second reference for the completion of $drm_crtc_state.event. It's + * used by the free code to remove the second reference if commit fails. + */ + bool abort_completion; +}; + +struct __drm_planes_state { + struct drm_plane *ptr; + struct drm_plane_state *state, *old_state, *new_state; +}; + +struct __drm_crtcs_state { + struct drm_crtc *ptr; + struct drm_crtc_state *state, *old_state, *new_state; + + /** + * @commit: + * + * A reference to the CRTC commit object that is kept for use by + * drm_atomic_helper_wait_for_flip_done() after + * drm_atomic_helper_commit_hw_done() is called. This ensures that a + * concurrent commit won't free a commit object that is still in use. + */ + struct drm_crtc_commit *commit; + + s32 __user *out_fence_ptr; + u64 last_vblank_count; +}; + +struct __drm_connnectors_state { + struct drm_connector *ptr; + struct drm_connector_state *state, *old_state, *new_state; + /** + * @out_fence_ptr: + * + * User-provided pointer which the kernel uses to return a sync_file + * file descriptor. Used by writeback connectors to signal completion of + * the writeback. + */ + s32 __user *out_fence_ptr; +}; + +struct drm_private_obj; +struct drm_private_state; + +/** + * struct drm_private_state_funcs - atomic state functions for private objects + * + * These hooks are used by atomic helpers to create, swap and destroy states of + * private objects. The structure itself is used as a vtable to identify the + * associated private object type. Each private object type that needs to be + * added to the atomic states is expected to have an implementation of these + * hooks and pass a pointer to its drm_private_state_funcs struct to + * drm_atomic_get_private_obj_state(). + */ +struct drm_private_state_funcs { + /** + * @atomic_duplicate_state: + * + * Duplicate the current state of the private object and return it. It + * is an error to call this before obj->state has been initialized. + * + * RETURNS: + * + * Duplicated atomic state or NULL when obj->state is not + * initialized or allocation failed. + */ + struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); + + /** + * @atomic_destroy_state: + * + * Frees the private object state created with @atomic_duplicate_state. + */ + void (*atomic_destroy_state)(struct drm_private_obj *obj, + struct drm_private_state *state); +}; + +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + * + * All private objects must be initialized before the DRM device they are + * attached to is registered to the DRM subsystem (call to drm_dev_register()) + * and should stay around until this DRM device is unregistered (call to + * drm_dev_unregister()). In other words, private objects lifetime is tied + * to the DRM device lifetime. This implies that: + * + * 1/ all calls to drm_atomic_private_obj_init() must be done before calling + * drm_dev_register() + * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling + * drm_dev_unregister() + */ +struct drm_private_obj { + /** + * @head: List entry used to attach a private object to a &drm_device + * (queued to &drm_mode_config.privobj_list). + */ + struct list_head head; + + /** + * @lock: Modeset lock to protect the state object. + */ + struct drm_modeset_lock lock; + + /** + * @state: Current atomic state for this driver private object. + */ + struct drm_private_state *state; + + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ + const struct drm_private_state_funcs *funcs; +}; + +/** + * drm_for_each_privobj() - private object iterator + * + * @privobj: pointer to the current private object. Updated after each + * iteration + * @dev: the DRM device we want get private objects from + * + * Allows one to iterate over all private objects attached to @dev + */ +#define drm_for_each_privobj(privobj, dev) \ + list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) + +/** + * struct drm_private_state - base struct for driver private object state + * @state: backpointer to global drm_atomic_state + * + * Currently only contains a backpointer to the overall atomic update, but in + * the future also might hold synchronization information similar to e.g. + * &drm_crtc.commit. + */ +struct drm_private_state { + struct drm_atomic_state *state; +}; + +struct __drm_private_objs_state { + struct drm_private_obj *ptr; + struct drm_private_state *state, *old_state, *new_state; +}; + +/** + * struct drm_atomic_state - the global state object for atomic updates + * @ref: count of all references to this state (will not be freed until zero) + * @dev: parent DRM device + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics + * @async_update: hint for asynchronous plane update + * @planes: pointer to array of structures with per-plane data + * @crtcs: pointer to array of CRTC pointers + * @num_connector: size of the @connectors and @connector_states arrays + * @connectors: pointer to array of structures with per-connector data + * @num_private_objs: size of the @private_objs array + * @private_objs: pointer to array of private object pointers + * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). + */ +struct drm_atomic_state { + struct kref ref; + + struct drm_device *dev; + + /** + * @allow_modeset: + * + * Allow full modeset. This is used by the ATOMIC IOCTL handler to + * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should + * never consult this flag, instead looking at the output of + * drm_atomic_crtc_needs_modeset(). + */ + bool allow_modeset : 1; + bool legacy_cursor_update : 1; + bool async_update : 1; + /** + * @duplicated: + * + * Indicates whether or not this atomic state was duplicated using + * drm_atomic_helper_duplicate_state(). Drivers and atomic helpers + * should use this to fixup normal inconsistencies in duplicated + * states. + */ + bool duplicated : 1; + struct __drm_planes_state *planes; + struct __drm_crtcs_state *crtcs; + int num_connector; + struct __drm_connnectors_state *connectors; + int num_private_objs; + struct __drm_private_objs_state *private_objs; + + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @fake_commit: + * + * Used for signaling unbound planes/connectors. + * When a connector or plane is not bound to any CRTC, it's still important + * to preserve linearity to prevent the atomic states from being freed to early. + * + * This commit (if set) is not bound to any CRTC, but will be completed when + * drm_atomic_helper_commit_hw_done() is called. + */ + struct drm_crtc_commit *fake_commit; + + /** + * @commit_work: + * + * Work item which can be used by the driver or helpers to execute the + * commit without blocking. + */ + struct work_struct commit_work; +}; + +void __drm_crtc_commit_free(struct kref *kref); + +/** + * drm_crtc_commit_get - acquire a reference to the CRTC commit + * @commit: CRTC commit + * + * Increases the reference of @commit. + * + * Returns: + * The pointer to @commit, with reference increased. + */ +static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit) +{ + kref_get(&commit->ref); + return commit; +} + +/** + * drm_crtc_commit_put - release a reference to the CRTC commmit + * @commit: CRTC commit + * + * This releases a reference to @commit which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, __drm_crtc_commit_free); +} + +struct drm_atomic_state * __must_check +drm_atomic_state_alloc(struct drm_device *dev); +void drm_atomic_state_clear(struct drm_atomic_state *state); + +/** + * drm_atomic_state_get - acquire a reference to the atomic state + * @state: The atomic state + * + * Returns a new reference to the @state + */ +static inline struct drm_atomic_state * +drm_atomic_state_get(struct drm_atomic_state *state) +{ + kref_get(&state->ref); + return state; +} + +void __drm_atomic_state_free(struct kref *ref); + +/** + * drm_atomic_state_put - release a reference to the atomic state + * @state: The atomic state + * + * This releases a reference to @state which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_atomic_state_put(struct drm_atomic_state *state) +{ + kref_put(&state->ref, __drm_atomic_state_free); +} + +int __must_check +drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_state_default_clear(struct drm_atomic_state *state); +void drm_atomic_state_default_release(struct drm_atomic_state *state); + +struct drm_crtc_state * __must_check +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc); +struct drm_plane_state * __must_check +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane); +struct drm_connector_state * __must_check +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector); + +void drm_atomic_private_obj_init(struct drm_device *dev, + struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); +void drm_atomic_private_obj_fini(struct drm_private_obj *obj); + +struct drm_private_state * __must_check +drm_atomic_get_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); +struct drm_private_state * +drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); +struct drm_private_state * +drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj); + +struct drm_connector * +drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); + +/** + * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the CRTC state for the given CRTC, or NULL + * if the CRTC is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_crtc_state or + * @drm_atomic_get_new_crtc_state should be used instead. + */ +static inline struct drm_crtc_state * +drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].state; +} + +/** + * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the old CRTC state for the given CRTC, or + * NULL if the CRTC is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_old_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].old_state; +} +/** + * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists + * @state: global atomic state object + * @crtc: CRTC to grab + * + * This function returns the new CRTC state for the given CRTC, or + * NULL if the CRTC is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_new_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtcs[drm_crtc_index(crtc)].new_state; +} + +/** + * drm_atomic_get_existing_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, or NULL + * if the plane is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_plane_state or + * @drm_atomic_get_new_plane_state should be used instead. + */ +static inline struct drm_plane_state * +drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].state; +} + +/** + * drm_atomic_get_old_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the old plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_old_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].old_state; +} + +/** + * drm_atomic_get_new_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the new plane state for the given plane, or + * NULL if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_new_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->planes[drm_plane_index(plane)].new_state; +} + +/** + * drm_atomic_get_existing_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + * + * This function is deprecated, @drm_atomic_get_old_connector_state or + * @drm_atomic_get_new_connector_state should be used instead. + */ +static inline struct drm_connector_state * +drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].state; +} + +/** + * drm_atomic_get_old_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the old connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_old_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].old_state; +} + +/** + * drm_atomic_get_new_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the new connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_new_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connectors[index].new_state; +} + +/** + * __drm_atomic_get_current_plane_state - get current plane state + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, either from + * @state, or if the plane isn't part of the atomic state update, from @plane. + * This is useful in atomic check callbacks, when drivers need to peek at, but + * not change, state of other planes, since it avoids threading an error code + * back up the call chain. + * + * WARNING: + * + * Note that this function is in general unsafe since it doesn't check for the + * required locking for access state structures. Drivers must ensure that it is + * safe to access the returned state structure through other means. One common + * example is when planes are fixed to a single CRTC, and the driver knows that + * the CRTC lock is held already. In that case holding the CRTC lock gives a + * read-lock on all planes connected to that CRTC. But if planes can be + * reassigned things get more tricky. In that case it's better to use + * drm_atomic_get_plane_state and wire up full error handling. + * + * Returns: + * + * Read-only pointer to the current plane state. + */ +static inline const struct drm_plane_state * +__drm_atomic_get_current_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + if (state->planes[drm_plane_index(plane)].state) + return state->planes[drm_plane_index(plane)].state; + + return plane->state; +} + +int __must_check +drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, + struct drm_encoder *encoder); +int __must_check +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +int __must_check drm_atomic_check_only(struct drm_atomic_state *state); +int __must_check drm_atomic_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); + +void drm_state_dump(struct drm_device *dev, struct drm_printer *p); + +/** + * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (old_connector_state) = (__state)->connectors[__i].old_state, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) + +/** + * for_each_old_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @old_connector_state: &struct drm_connector_state iteration cursor for the + * old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (old_connector_state) = (__state)->connectors[__i].old_state, 1)) + +/** + * for_each_new_connector_in_state - iterate over all connectors in an atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @new_connector_state: &struct drm_connector_state iteration cursor for the + * new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ + (new_connector_state) = (__state)->connectors[__i].new_state, \ + (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, \ + (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)) + +/** + * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) + +/** + * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc; \ + (__i)++) \ + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, \ + (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking both old and + * new state. This is useful in places where the state delta needs to be + * considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic + * update in reverse order + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update in reverse order, + * tracking both old and new state. This is useful in places where the + * state delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ + (__i) >= 0; \ + (__i)--) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) + +/** + * for_each_old_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @old_plane_state: &struct drm_plane_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the old + * state. This is useful in disable functions, where we need the old state the + * hardware is still in. + */ +#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state, 1)) +/** + * for_each_new_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @new_plane_state: &struct drm_plane_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update, tracking only the new + * state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ + (__i)++) \ + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ + (new_plane_state) = (__state)->planes[__i].new_state, \ + (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1)) + +/** + * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking both + * old and new state. This is useful in places where the state delta needs + * to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the old state. This is useful in disable functions, where we need the old + * state the hardware is still in. + */ +#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ + (__i)++) + +/** + * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the new state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) + +/** + * drm_atomic_crtc_needs_modeset - compute combined modeset need + * @state: &drm_crtc_state for the CRTC + * + * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track + * whether the state CRTC changed enough to need a full modeset cycle: + * mode_changed, active_changed and connectors_changed. This helper simply + * combines these three to compute the overall need for a modeset for @state. + * + * The atomic helper code sets these booleans, but drivers can and should + * change them appropriately to accurately represent whether a modeset is + * really needed. In general, drivers should avoid full modesets whenever + * possible. + * + * For example if the CRTC mode has changed, and the hardware is able to enact + * the requested mode change without going through a full modeset, the driver + * should clear mode_changed in its &drm_mode_config_funcs.atomic_check + * implementation. + */ +static inline bool +drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed || + state->connectors_changed; +} + +/** + * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active + * @state: &drm_crtc_state for the CRTC + * + * When in self refresh mode, the crtc_state->active value will be false, since + * the CRTC is off. However in some cases we're interested in whether the CRTC + * is active, or effectively active (ie: it's connected to an active display). + * In these cases, use this function instead of just checking active. + */ +static inline bool +drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) +{ + return state->active || state->self_refresh_active; +} + +/** + * struct drm_bus_cfg - bus configuration + * + * This structure stores the configuration of a physical bus between two + * components in an output pipeline, usually between two bridges, an encoder + * and a bridge, or a bridge and a connector. + * + * The bus configuration is stored in &drm_bridge_state separately for the + * input and output buses, as seen from the point of view of each bridge. The + * bus configuration of a bridge output is usually identical to the + * configuration of the next bridge's input, but may differ if the signals are + * modified between the two bridges, for instance by an inverter on the board. + * The input and output configurations of a bridge may differ if the bridge + * modifies the signals internally, for instance by performing format + * conversion, or modifying signals polarities. + */ +struct drm_bus_cfg { + /** + * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format) + * + * This field should not be directly modified by drivers + * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus + * format negotiation). + */ + u32 format; + + /** + * @flags: DRM_BUS_* flags used on this bus + */ + u32 flags; +}; + +/** + * struct drm_bridge_state - Atomic bridge state object + */ +struct drm_bridge_state { + /** + * @base: inherit from &drm_private_state + */ + struct drm_private_state base; + + /** + * @bridge: the bridge this state refers to + */ + struct drm_bridge *bridge; + + /** + * @input_bus_cfg: input bus configuration + */ + struct drm_bus_cfg input_bus_cfg; + + /** + * @output_bus_cfg: input bus configuration + */ + struct drm_bus_cfg output_bus_cfg; +}; + +static inline struct drm_bridge_state * +drm_priv_to_bridge_state(struct drm_private_state *priv) +{ + return container_of(priv, struct drm_bridge_state, base); +} + +struct drm_bridge_state * +drm_atomic_get_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_old_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_new_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); + +#endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h new file mode 100644 index 000000000..85df04c8e --- /dev/null +++ b/include/drm/drm_atomic_helper.h @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_HELPER_H_ +#define DRM_ATOMIC_HELPER_H_ + +#include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_modeset_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_util.h> + +struct drm_atomic_state; +struct drm_private_obj; +struct drm_private_state; + +int drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); +int drm_atomic_helper_check_planes(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state, + bool pre_swap); + +void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void +drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void +drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); + +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state); + +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); + +#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) +#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) + +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *state, + uint32_t flags); +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state); +void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void +drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, + bool atomic); + +int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); + +/* nonblocking commit helpers */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock); +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); +void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); + +/* implementations for legacy interfaces */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); + +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void drm_atomic_helper_shutdown(struct drm_device *dev); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target, + struct drm_modeset_acquire_ctx *ctx); +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx); + +/** + * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC + * @plane: the loop cursor + * @crtc: the CRTC whose planes are iterated + * + * This iterates over the current state, useful (for example) when applying + * atomic state after it has been checked and swapped. To iterate over the + * planes which *will* be attached (more useful in code called from + * &drm_mode_config_funcs.atomic_check) see + * drm_atomic_crtc_state_for_each_plane(). + */ +#define drm_atomic_crtc_for_each_plane(plane, crtc) \ + drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) + +/** + * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state + * @plane: the loop cursor + * @crtc_state: the incoming CRTC state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + */ +#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) + +/** + * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state + * @plane: the loop cursor + * @plane_state: loop cursor for the plane's state, must be const + * @crtc_state: the incoming CRTC state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. + * + * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a + * const plane_state. This is useful when a driver just wants to peek at other + * active planes on this CRTC, but does not need to change it. + */ +#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ + for_each_if ((plane_state = \ + __drm_atomic_get_current_plane_state((crtc_state)->state, \ + plane))) + +/** + * drm_atomic_plane_disabling - check whether a plane is being disabled + * @old_plane_state: old atomic plane state + * @new_plane_state: new atomic plane state + * + * Checks the atomic state of a plane to determine whether it's being disabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being disabled, false otherwise. + */ +static inline bool +drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + /* + * When disabling a plane, CRTC and FB should always be NULL together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || + (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); + + return old_plane_state->crtc && !new_plane_state->crtc; +} + +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + +#endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h new file mode 100644 index 000000000..3f8f1d627 --- /dev/null +++ b/include/drm/drm_atomic_state_helper.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#include <linux/types.h> + +struct drm_bridge; +struct drm_bridge_state; +struct drm_crtc; +struct drm_crtc_state; +struct drm_plane; +struct drm_plane_state; +struct drm_connector; +struct drm_connector_state; +struct drm_private_obj; +struct drm_private_state; +struct drm_modeset_acquire_ctx; +struct drm_device; + +void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state, + struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, + struct drm_crtc_state *state); +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state, + struct drm_plane *plane); +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *state); +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state, + struct drm_connector *connector); +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); + +void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge); +void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_reset(struct drm_bridge *bridge); diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h new file mode 100644 index 000000000..8cec52ad1 --- /dev/null +++ b/include/drm/drm_atomic_uapi.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_UAPI_H_ +#define DRM_ATOMIC_UAPI_H_ + +struct drm_crtc_state; +struct drm_display_mode; +struct drm_property_blob; +struct drm_plane_state; +struct drm_crtc; +struct drm_connector_state; +struct dma_fence; +struct drm_framebuffer; + +int __must_check +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + const struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, + struct dma_fence *fence); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); + +#endif diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h new file mode 100644 index 000000000..0d36bfd1a --- /dev/null +++ b/include/drm/drm_audio_component.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: MIT +// Copyright © 2014 Intel Corporation + +#ifndef _DRM_AUDIO_COMPONENT_H_ +#define _DRM_AUDIO_COMPONENT_H_ + +struct drm_audio_component; +struct device; + +/** + * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver + */ +struct drm_audio_component_ops { + /** + * @owner: drm module to pin down + */ + struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + * + * Returns a wakeref cookie to be passed back to the corresponding + * call to @put_power. + */ + unsigned long (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ + void (*put_power)(struct device *, unsigned long); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ + void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ + int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ + int (*sync_audio_rate)(struct device *, int port, int pipe, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, int pipe, bool *enabled, + unsigned char *buf, int max_bytes); +}; + +/** + * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver + */ +struct drm_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ + void *audio_ptr; + /** + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the DRM driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). + */ + void (*pin_eld_notify)(void *audio_ptr, int port, int pipe); + /** + * @pin2port: Check and convert from pin node to port number + * + * Called by HDA driver to check and convert from the pin widget node + * number to a port number in the graphics side. + */ + int (*pin2port)(void *audio_ptr, int pin); + /** + * @master_bind: (Optional) component master bind callback + * + * Called at binding master component, for HDA codec-specific + * handling of dynamic binding. + */ + int (*master_bind)(struct device *dev, struct drm_audio_component *); + /** + * @master_unbind: (Optional) component master unbind callback + * + * Called at unbinding master component, for HDA codec-specific + * handling of dynamic unbinding. + */ + void (*master_unbind)(struct device *dev, struct drm_audio_component *); +}; + +/** + * struct drm_audio_component - Used for direct communication between DRM and hda drivers + */ +struct drm_audio_component { + /** + * @dev: DRM device, used as parameter for ops + */ + struct device *dev; + /** + * @ops: Ops implemented by DRM driver, called by hda driver + */ + const struct drm_audio_component_ops *ops; + /** + * @audio_ops: Ops implemented by hda driver, called by DRM driver + */ + const struct drm_audio_component_audio_ops *audio_ops; + /** + * @master_bind_complete: completion held during component master binding + */ + struct completion master_bind_complete; +}; + +#endif /* _DRM_AUDIO_COMPONENT_H_ */ diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h new file mode 100644 index 000000000..f99d3417f --- /dev/null +++ b/include/drm/drm_auth.h @@ -0,0 +1,116 @@ +#ifndef _DRM_AUTH_H_ +#define _DRM_AUTH_H_ + +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2016 Intel Corporation + * + * Author: Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/wait.h> + +struct drm_file; +struct drm_hw_lock; + +/* + * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for + * include ordering reasons. + * + * DO NOT USE. + */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; + struct drm_file *file_priv; + wait_queue_head_t lock_queue; + unsigned long lock_time; + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; +}; + +/** + * struct drm_master - drm master structure + * + * @refcount: Refcount for this master object. + * @dev: Link back to the DRM device + * @driver_priv: Pointer to driver-private information. + * @lessor: Lease holder + * @lessee_id: id for lessees. Owners always have id 0 + * @lessee_list: other lessees of the same master + * @lessees: drm_masters leasing from this one + * @leases: Objects leased to this drm_master. + * @lessee_idr: All lessees under this owner (only used where lessor == NULL) + * + * Note that master structures are only relevant for the legacy/primary device + * nodes, hence there can only be one per device, not one per drm_minor. + */ +struct drm_master { + struct kref refcount; + struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by + * &drm_device.master_mutex. + */ + char *unique; + /** + * @unique_len: Length of unique field. Protected by + * &drm_device.master_mutex. + */ + int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by + * &drm_device.master_mutex. + */ + struct idr magic_map; + void *driver_priv; + + /* Tree of display resource leases, each of which is a drm_master struct + * All of these get activated simultaneously, so drm_device master points + * at the top of the tree (for which lessor is NULL). Protected by + * &drm_device.mode_config.idr_mutex. + */ + + struct drm_master *lessor; + int lessee_id; + struct list_head lessee_list; + struct list_head lessees; + struct idr leases; + struct idr lessee_idr; + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + struct drm_lock_data lock; +#endif +}; + +struct drm_master *drm_master_get(struct drm_master *master); +struct drm_master *drm_file_get_master(struct drm_file *file_priv); +void drm_master_put(struct drm_master **master); +bool drm_is_current_master(struct drm_file *fpriv); + +struct drm_master *drm_master_create(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h new file mode 100644 index 000000000..88bdfec3b --- /dev/null +++ b/include/drm/drm_blend.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BLEND_H__ +#define __DRM_BLEND_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode.h> + +#define DRM_MODE_BLEND_PREMULTI 0 +#define DRM_MODE_BLEND_COVERAGE 1 +#define DRM_MODE_BLEND_PIXEL_NONE 2 + +struct drm_device; +struct drm_atomic_state; +struct drm_plane; + +static inline bool drm_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); +} + +#define DRM_BLEND_ALPHA_OPAQUE 0xffff + +int drm_plane_create_alpha_property(struct drm_plane *plane); +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations); +unsigned int drm_rotation_simplify(unsigned int rotation, + unsigned int supported_rotations); + +int drm_plane_create_zpos_property(struct drm_plane *plane, + unsigned int zpos, + unsigned int min, unsigned int max); +int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, + unsigned int zpos); +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_plane_create_blend_mode_property(struct drm_plane *plane, + unsigned int supported_modes); +#endif diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h new file mode 100644 index 000000000..3826cf955 --- /dev/null +++ b/include/drm/drm_bridge.h @@ -0,0 +1,897 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_BRIDGE_H__ +#define __DRM_BRIDGE_H__ + +#include <linux/ctype.h> +#include <linux/list.h> +#include <linux/mutex.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_encoder.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_modes.h> + +struct drm_bridge; +struct drm_bridge_timings; +struct drm_connector; +struct drm_display_info; +struct drm_panel; +struct edid; +struct i2c_adapter; + +/** + * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach + */ +enum drm_bridge_attach_flags { + /** + * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge + * shall not create a drm_connector. + */ + DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0), +}; + +/** + * struct drm_bridge_funcs - drm_bridge control functions + */ +struct drm_bridge_funcs { + /** + * @attach: + * + * This callback is invoked whenever our bridge is being attached to a + * &drm_encoder. The flags argument tunes the behaviour of the attach + * operation (see DRM_BRIDGE_ATTACH_*). + * + * The @attach callback is optional. + * + * RETURNS: + * + * Zero on success, error code on failure. + */ + int (*attach)(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags); + + /** + * @detach: + * + * This callback is invoked whenever our bridge is being detached from a + * &drm_encoder. + * + * The @detach callback is optional. + */ + void (*detach)(struct drm_bridge *bridge); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * bridge. This should be implemented if the bridge has some sort of + * restriction in the modes it can display. For example, a given bridge + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * The @mode_valid callback is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. See also &drm_crtc_state.adjusted_mode for more details. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup() + * is not called when &drm_bridge_funcs.atomic_check() is implemented, + * so only one of them should be provided. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in @state parameter. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any bridge constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * The @disable callback is optional. + */ + void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. + * + * The @post_disable callback is optional. + */ + void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the @mode_set callback for the preceding element in the display + * pipeline has been called already. If the bridge is the first element + * then this would be &drm_encoder_helper_funcs.mode_set. The display + * pipe (i.e. clocks and timing signals) is off when this function is + * called. + * + * The adjusted_mode parameter is the mode output by the CRTC for the + * first bridge in the chain. It can be different from the mode + * parameter that contains the desired mode for the connector at the end + * of the bridges chain, for instance when the first bridge in the chain + * performs scaling. The adjusted mode is mostly useful for the first + * bridge in the chain and is likely irrelevant for the other bridges. + * + * For atomic drivers the adjusted_mode is the mode stored in + * &drm_crtc_state.adjusted_mode. + * + * NOTE: + * + * If a need arises to store and access modes adjusted for other + * locations than the connection between the CRTC and the first bridge, + * the DRM framework will have to be extended with DRM bridge states. + */ + void (*mode_set)(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * The @pre_enable callback is optional. + */ + void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * The @enable callback is optional. + */ + void (*enable)(struct drm_bridge *bridge); + + /** + * @atomic_pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_pre_enable or @pre_enable function. If the preceding + * element is a &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_pre_enable. It would be prudent to also provide an + * implementation of @pre_enable if you are expecting driver calls into + * &drm_bridge_chain_pre_enable. + * + * The @atomic_pre_enable callback is optional. + */ + void (*atomic_pre_enable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @atomic_enable or @enable function. If the preceding element + * is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_chain_enable. + * It would be prudent to also provide an implementation of @enable if + * you are expecting driver calls into &drm_bridge_chain_enable. + * + * The @atomic_enable callback is optional. + */ + void (*atomic_enable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + /** + * @atomic_disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_disable or @disable vfunc. If the preceding element + * is a &drm_encoder it's called right before the + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_disable. It would be prudent to also provide an + * implementation of @disable if you are expecting driver calls into + * &drm_bridge_chain_disable. + * + * The @atomic_disable callback is optional. + */ + void (*atomic_disable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @atomic_post_disable or @post_disable function. If the preceding + * element is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from + * &drm_bridge_chain_post_disable. + * It would be prudent to also provide an implementation of + * @post_disable if you are expecting driver calls into + * &drm_bridge_chain_post_disable. + * + * The @atomic_post_disable callback is optional. + */ + void (*atomic_post_disable)(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current bridge state object (which is guaranteed to be + * non-NULL). + * + * The atomic_duplicate_state hook is mandatory if the bridge + * implements any of the atomic hooks, and should be left unassigned + * otherwise. For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_duplicate_state() helper function shall be + * used to implement this hook. + * + * RETURNS: + * A valid drm_bridge_state object or NULL if the allocation fails. + */ + struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge); + + /** + * @atomic_destroy_state: + * + * Destroy a bridge state object previously allocated by + * &drm_bridge_funcs.atomic_duplicate_state(). + * + * The atomic_destroy_state hook is mandatory if the bridge implements + * any of the atomic hooks, and should be left unassigned otherwise. + * For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_destroy_state() helper function shall be + * used to implement this hook. + */ + void (*atomic_destroy_state)(struct drm_bridge *bridge, + struct drm_bridge_state *state); + + /** + * @atomic_get_output_bus_fmts: + * + * Return the supported bus formats on the output end of a bridge. + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_output_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). + * + * This method is only called on the last element of the bridge chain + * as part of the bus format negotiation process that happens in + * &drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will + * fall back to &drm_connector.display_info.bus_formats[0] if + * &drm_connector.display_info.num_bus_formats > 0, + * or to MEDIA_BUS_FMT_FIXED otherwise. + */ + u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts); + + /** + * @atomic_get_input_bus_fmts: + * + * Return the supported bus formats on the input end of a bridge for + * a specific output bus format. + * + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_input_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). When the format is not supported NULL should be + * returned and num_input_fmts should be set to 0. + * + * This method is called on all elements of the bridge chain as part of + * the bus format negotiation process that happens in + * drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will bypass + * bus format negotiation on this element of the bridge without + * failing, and the previous element in the chain will be passed + * MEDIA_BUS_FMT_FIXED as its output bus format. + * + * Bridge drivers that need to support being linked to bridges that are + * not supporting bus format negotiation should handle the + * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a + * sensible default value or extracting this information from somewhere + * else (FW property, &drm_display_mode, &drm_display_info, ...) + * + * Note: Even if input format selection on the first bridge has no + * impact on the negotiation process (bus format negotiation stops once + * we reach the first element of the chain), drivers are expected to + * return accurate input formats as the input format may be used to + * configure the CRTC output appropriately. + */ + u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + + /** + * @atomic_check: + * + * This method is responsible for checking bridge state correctness. + * It can also check the state of the surrounding components in chain + * to make sure the whole pipeline can work properly. + * + * &drm_bridge_funcs.atomic_check() hooks are called in reverse + * order (from the last to the first bridge). + * + * This method is optional. &drm_bridge_funcs.mode_fixup() is not + * called when &drm_bridge_funcs.atomic_check() is implemented, so only + * one of them should be provided. + * + * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or + * &drm_bridge_state.output_bus_cfg.flags it should happen in + * this function. By default the &drm_bridge_state.output_bus_cfg.flags + * field is set to the next bridge + * &drm_bridge_state.input_bus_cfg.flags value or + * &drm_connector.display_info.bus_flags if the bridge is the last + * element in the chain. + * + * RETURNS: + * zero if the check passed, a negative error code otherwise. + */ + int (*atomic_check)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @atomic_reset: + * + * Reset the bridge to a predefined state (or retrieve its current + * state) and return a &drm_bridge_state object matching this state. + * This function is called at attach time. + * + * The atomic_reset hook is mandatory if the bridge implements any of + * the atomic hooks, and should be left unassigned otherwise. For + * bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_reset() helper function shall be used to + * implement this hook. + * + * Note that the atomic_reset() semantics is not exactly matching the + * reset() semantics found on other components (connector, plane, ...). + * + * 1. The reset operation happens when the bridge is attached, not when + * drm_mode_config_reset() is called + * 2. It's meant to be used exclusively on bridges that have been + * converted to the ATOMIC API + * + * RETURNS: + * A valid drm_bridge_state object in case of success, an ERR_PTR() + * giving the reason of the failure otherwise. + */ + struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge); + + /** + * @detect: + * + * Check if anything is attached to the bridge output. + * + * This callback is optional, if not implemented the bridge will be + * considered as always having a component attached to its output. + * Bridges that implement this callback shall set the + * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops. + * + * RETURNS: + * + * drm_connector_status indicating the bridge output status. + */ + enum drm_connector_status (*detect)(struct drm_bridge *bridge); + + /** + * @get_modes: + * + * Fill all modes currently valid for the sink into the &drm_connector + * with drm_mode_probed_add(). + * + * The @get_modes callback is mostly intended to support non-probeable + * displays such as many fixed panels. Bridges that support reading + * EDID shall leave @get_modes unimplemented and implement the + * &drm_bridge_funcs->get_edid callback instead. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of + * filling modes, and shall not be stored internally by bridge drivers + * for future usage. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @get_edid: + * + * Read and parse the EDID data of the connected display. + * + * The @get_edid callback is the preferred way of reporting mode + * information for a display connected to the bridge output. Bridges + * that support reading EDID shall implement this callback and leave + * the @get_modes callback unimplemented. + * + * The caller of this operation shall first verify the output + * connection status and refrain from reading EDID from a disconnected + * output. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of EDID + * retrieval and parsing, and shall not be stored internally by bridge + * drivers for future usage. + * + * RETURNS: + * + * An edid structure newly allocated with kmalloc() (or similar) on + * success, or NULL otherwise. The caller is responsible for freeing + * the returned edid structure with kfree(). + */ + struct edid *(*get_edid)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hpd_notify: + * + * Notify the bridge of hot plug detection. + * + * This callback is optional, it may be implemented by bridges that + * need to be notified of display connection or disconnection for + * internal reasons. One use case is to reset the internal state of CEC + * controllers for HDMI bridges. + */ + void (*hpd_notify)(struct drm_bridge *bridge, + enum drm_connector_status status); + + /** + * @hpd_enable: + * + * Enable hot plug detection. From now on the bridge shall call + * drm_bridge_hpd_notify() each time a change is detected in the output + * connection status, until hot plug detection gets disabled with + * @hpd_disable. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_disable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_enable)(struct drm_bridge *bridge); + + /** + * @hpd_disable: + * + * Disable hot plug detection. Once this function returns the bridge + * shall not call drm_bridge_hpd_notify() when a change in the output + * connection status occurs. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_enable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_disable)(struct drm_bridge *bridge); +}; + +/** + * struct drm_bridge_timings - timing information for the bridge + */ +struct drm_bridge_timings { + /** + * @input_bus_flags: + * + * Tells what additional settings for the pixel data on the bus + * this bridge requires (like pixel signal polarity). See also + * &drm_display_info->bus_flags. + */ + u32 input_bus_flags; + /** + * @setup_time_ps: + * + * Defines the time in picoseconds the input data lines must be + * stable before the clock edge. + */ + u32 setup_time_ps; + /** + * @hold_time_ps: + * + * Defines the time in picoseconds taken for the bridge to sample the + * input signal after the clock edge. + */ + u32 hold_time_ps; + /** + * @dual_link: + * + * True if the bus operates in dual-link mode. The exact meaning is + * dependent on the bus type. For LVDS buses, this indicates that even- + * and odd-numbered pixels are received on separate links. + */ + bool dual_link; +}; + +/** + * enum drm_bridge_ops - Bitmask of operations supported by the bridge + */ +enum drm_bridge_ops { + /** + * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to + * its output. Bridges that set this flag shall implement the + * &drm_bridge_funcs->detect callback. + */ + DRM_BRIDGE_OP_DETECT = BIT(0), + /** + * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display + * connected to its output. Bridges that set this flag shall implement + * the &drm_bridge_funcs->get_edid callback. + */ + DRM_BRIDGE_OP_EDID = BIT(1), + /** + * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug + * without requiring polling. Bridges that set this flag shall + * implement the &drm_bridge_funcs->hpd_enable and + * &drm_bridge_funcs->hpd_disable callbacks if they support enabling + * and disabling hot-plug detection dynamically. + */ + DRM_BRIDGE_OP_HPD = BIT(2), + /** + * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported + * by the display at its output. This does not include reading EDID + * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set + * this flag shall implement the &drm_bridge_funcs->get_modes callback. + */ + DRM_BRIDGE_OP_MODES = BIT(3), +}; + +/** + * struct drm_bridge - central DRM bridge control structure + */ +struct drm_bridge { + /** @base: inherit from &drm_private_object */ + struct drm_private_obj base; + /** @dev: DRM device this bridge belongs to */ + struct drm_device *dev; + /** @encoder: encoder to which this bridge is connected */ + struct drm_encoder *encoder; + /** @chain_node: used to form a bridge chain */ + struct list_head chain_node; +#ifdef CONFIG_OF + /** @of_node: device node pointer to the bridge */ + struct device_node *of_node; +#endif + /** @list: to keep track of all added bridges */ + struct list_head list; + /** + * @timings: + * + * the timing specification for the bridge, if any (may be NULL) + */ + const struct drm_bridge_timings *timings; + /** @funcs: control functions */ + const struct drm_bridge_funcs *funcs; + /** @driver_private: pointer to the bridge driver's internal context */ + void *driver_private; + /** @ops: bitmask of operations supported by the bridge */ + enum drm_bridge_ops ops; + /** + * @type: Type of the connection at the bridge output + * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this + * identifies the type of connected display. + */ + int type; + /** + * @interlace_allowed: Indicate that the bridge can handle interlaced + * modes. + */ + bool interlace_allowed; + /** + * @ddc: Associated I2C adapter for DDC access, if any. + */ + struct i2c_adapter *ddc; + /** private: */ + /** + * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields. + */ + struct mutex hpd_mutex; + /** + * @hpd_cb: Hot plug detection callback, registered with + * drm_bridge_hpd_enable(). + */ + void (*hpd_cb)(void *data, enum drm_connector_status status); + /** + * @hpd_data: Private data passed to the Hot plug detection callback + * @hpd_cb. + */ + void *hpd_data; +}; + +static inline struct drm_bridge * +drm_priv_to_bridge(struct drm_private_obj *priv) +{ + return container_of(priv, struct drm_bridge, base); +} + +void drm_bridge_add(struct drm_bridge *bridge); +void drm_bridge_remove(struct drm_bridge *bridge); +struct drm_bridge *of_drm_find_bridge(struct device_node *np); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous, + enum drm_bridge_attach_flags flags); + +/** + * drm_bridge_get_next_bridge() - Get the next bridge in the chain + * @bridge: bridge object + * + * RETURNS: + * the next bridge in the chain after @bridge, or NULL if @bridge is the last. + */ +static inline struct drm_bridge * +drm_bridge_get_next_bridge(struct drm_bridge *bridge) +{ + if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) + return NULL; + + return list_next_entry(bridge, chain_node); +} + +/** + * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain + * @bridge: bridge object + * + * RETURNS: + * the previous bridge in the chain, or NULL if @bridge is the first. + */ +static inline struct drm_bridge * +drm_bridge_get_prev_bridge(struct drm_bridge *bridge) +{ + if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) + return NULL; + + return list_prev_entry(bridge, chain_node); +} + +/** + * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain + * @encoder: encoder object + * + * RETURNS: + * the first bridge in the chain, or NULL if @encoder has no bridge attached + * to it. + */ +static inline struct drm_bridge * +drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder) +{ + return list_first_entry_or_null(&encoder->bridge_chain, + struct drm_bridge, chain_node); +} + +/** + * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain + * @encoder: the encoder to iterate bridges on + * @bridge: a bridge pointer updated to point to the current bridge at each + * iteration + * + * Iterate over all bridges present in the bridge chain attached to @encoder. + */ +#define drm_for_each_bridge_in_chain(encoder, bridge) \ + list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) + +bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_mode_status +drm_bridge_chain_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode); +void drm_bridge_chain_disable(struct drm_bridge *bridge); +void drm_bridge_chain_post_disable(struct drm_bridge *bridge); +void drm_bridge_chain_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); +void drm_bridge_chain_pre_enable(struct drm_bridge *bridge); +void drm_bridge_chain_enable(struct drm_bridge *bridge); + +int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); + +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + +enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); +int drm_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector); +struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector); +void drm_bridge_hpd_enable(struct drm_bridge *bridge, + void (*cb)(void *data, + enum drm_connector_status status), + void *data); +void drm_bridge_hpd_disable(struct drm_bridge *bridge); +void drm_bridge_hpd_notify(struct drm_bridge *bridge, + enum drm_connector_status status); + +#ifdef CONFIG_DRM_PANEL_BRIDGE +struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel); +struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, + u32 connector_type); +void drm_panel_bridge_remove(struct drm_bridge *bridge); +struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, + struct drm_panel *panel); +struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, + struct drm_panel *panel, + u32 connector_type); +struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge); +#endif + +#endif diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h new file mode 100644 index 000000000..33f6c3bbd --- /dev/null +++ b/include/drm/drm_bridge_connector.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef __DRM_BRIDGE_CONNECTOR_H__ +#define __DRM_BRIDGE_CONNECTOR_H__ + +struct drm_connector; +struct drm_device; +struct drm_encoder; + +void drm_bridge_connector_enable_hpd(struct drm_connector *connector); +void drm_bridge_connector_disable_hpd(struct drm_connector *connector); +struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, + struct drm_encoder *encoder); + +#endif /* __DRM_BRIDGE_CONNECTOR_H__ */ diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h new file mode 100644 index 000000000..e9ad4863d --- /dev/null +++ b/include/drm/drm_cache.h @@ -0,0 +1,73 @@ +/************************************************************************** + * + * Copyright 2009 Red Hat Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Dave Airlie <airlied@redhat.com> + */ + +#ifndef _DRM_CACHE_H_ +#define _DRM_CACHE_H_ + +#include <linux/scatterlist.h> + +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); +bool drm_need_swiotlb(int dma_bits); + + +static inline bool drm_arch_can_wc_memory(void) +{ +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) + return false; +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON64) + return false; +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) + /* + * The DRM driver stack is designed to work with cache coherent devices + * only, but permits an optimization to be enabled in some cases, where + * for some buffers, both the CPU and the GPU use uncached mappings, + * removing the need for DMA snooping and allocation in the CPU caches. + * + * The use of uncached GPU mappings relies on the correct implementation + * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU + * will use cached mappings nonetheless. On x86 platforms, this does not + * seem to matter, as uncached CPU mappings will snoop the caches in any + * case. However, on ARM and arm64, enabling this optimization on a + * platform where NoSnoop is ignored results in loss of coherency, which + * breaks correct operation of the device. Since we have no way of + * detecting whether NoSnoop works or not, just disable this + * optimization entirely for ARM and arm64. + */ + return false; +#else + return true; +#endif +} + +#endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h new file mode 100644 index 000000000..7aaea665b --- /dev/null +++ b/include/drm/drm_client.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +#ifndef _DRM_CLIENT_H_ +#define _DRM_CLIENT_H_ + +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> + +struct drm_client_dev; +struct drm_device; +struct drm_file; +struct drm_framebuffer; +struct drm_gem_object; +struct drm_minor; +struct module; + +/** + * struct drm_client_funcs - DRM client callbacks + */ +struct drm_client_funcs { + /** + * @owner: The module owner + */ + struct module *owner; + + /** + * @unregister: + * + * Called when &drm_device is unregistered. The client should respond by + * releasing its resources using drm_client_release(). + * + * This callback is optional. + */ + void (*unregister)(struct drm_client_dev *client); + + /** + * @restore: + * + * Called on drm_lastclose(). The first client instance in the list that + * returns zero gets the privilege to restore and no more clients are + * called. This callback is not called after @unregister has been called. + * + * Note that the core does not guarantee exclusion against concurrent + * drm_open(). Clients need to ensure this themselves, for example by + * using drm_master_internal_acquire() and + * drm_master_internal_release(). + * + * This callback is optional. + */ + int (*restore)(struct drm_client_dev *client); + + /** + * @hotplug: + * + * Called on drm_kms_helper_hotplug_event(). + * This callback is not called after @unregister has been called. + * + * This callback is optional. + */ + int (*hotplug)(struct drm_client_dev *client); +}; + +/** + * struct drm_client_dev - DRM client instance + */ +struct drm_client_dev { + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @name: Name of the client. + */ + const char *name; + + /** + * @list: + * + * List of all clients of a DRM device, linked into + * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex. + */ + struct list_head list; + + /** + * @funcs: DRM client functions (optional) + */ + const struct drm_client_funcs *funcs; + + /** + * @file: DRM file + */ + struct drm_file *file; + + /** + * @modeset_mutex: Protects @modesets. + */ + struct mutex modeset_mutex; + + /** + * @modesets: CRTC configurations + */ + struct drm_mode_set *modesets; +}; + +int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs); +void drm_client_release(struct drm_client_dev *client); +void drm_client_register(struct drm_client_dev *client); + +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev); + +/** + * struct drm_client_buffer - DRM client buffer + */ +struct drm_client_buffer { + /** + * @client: DRM client + */ + struct drm_client_dev *client; + + /** + * @handle: Buffer handle + */ + u32 handle; + + /** + * @pitch: Buffer pitch + */ + u32 pitch; + + /** + * @gem: GEM object backing this buffer + */ + struct drm_gem_object *gem; + + /** + * @vaddr: Virtual address for the buffer + */ + void *vaddr; + + /** + * @fb: DRM framebuffer + */ + struct drm_framebuffer *fb; +}; + +struct drm_client_buffer * +drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); +void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); +int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +void *drm_client_buffer_vmap(struct drm_client_buffer *buffer); +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); + +int drm_client_modeset_create(struct drm_client_dev *client); +void drm_client_modeset_free(struct drm_client_dev *client); +int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height); +bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation); +int drm_client_modeset_check(struct drm_client_dev *client); +int drm_client_modeset_commit_locked(struct drm_client_dev *client); +int drm_client_modeset_commit(struct drm_client_dev *client); +int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); + +/** + * drm_client_for_each_modeset() - Iterate over client modesets + * @modeset: &drm_mode_set loop cursor + * @client: DRM client + */ +#define drm_client_for_each_modeset(modeset, client) \ + for (({ lockdep_assert_held(&(client)->modeset_mutex); }), \ + modeset = (client)->modesets; modeset->crtc; modeset++) + +/** + * drm_client_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * This iterates the connectors that are useable for internal clients (excludes + * writeback connectors). + * + * For more info see drm_for_each_connector_iter(). + */ +#define drm_client_for_each_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + +void drm_client_debugfs_init(struct drm_minor *minor); + +#endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h new file mode 100644 index 000000000..81c298488 --- /dev/null +++ b/include/drm/drm_color_mgmt.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_COLOR_MGMT_H__ +#define __DRM_COLOR_MGMT_H__ + +#include <linux/ctype.h> +#include <drm/drm_property.h> + +struct drm_crtc; +struct drm_plane; + +/** + * drm_color_lut_extract - clamp and round LUT entries + * @user_input: input value + * @bit_precision: number of bits the hw LUT supports + * + * Extract a degamma/gamma LUT value provided by user (in the form of + * &drm_color_lut entries) and round it to the precision supported by the + * hardware. + */ +static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) +{ + u32 val = user_input; + u32 max = 0xffff >> (16 - bit_precision); + + /* Round only if we're not using full precision. */ + if (bit_precision < 16) { + val += 1UL << (16 - bit_precision - 1); + val >>= 16 - bit_precision; + } + + return clamp_val(val, 0, max); +} + +u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); + +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size); + +int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, + int gamma_size); + +/** + * drm_color_lut_size - calculate the number of entries in the LUT + * @blob: blob containing the LUT + * + * Returns: + * The number of entries in the color LUT stored in @blob. + */ +static inline int drm_color_lut_size(const struct drm_property_blob *blob) +{ + return blob->length / sizeof(struct drm_color_lut); +} + +enum drm_color_encoding { + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_BT2020, + DRM_COLOR_ENCODING_MAX, +}; + +enum drm_color_range { + DRM_COLOR_YCBCR_LIMITED_RANGE, + DRM_COLOR_YCBCR_FULL_RANGE, + DRM_COLOR_RANGE_MAX, +}; + +int drm_plane_create_color_properties(struct drm_plane *plane, + u32 supported_encodings, + u32 supported_ranges, + enum drm_color_encoding default_encoding, + enum drm_color_range default_range); + +/** + * enum drm_color_lut_tests - hw-specific LUT tests to perform + * + * The drm_color_lut_check() function takes a bitmask of the values here to + * determine which tests to apply to a userspace-provided LUT. + */ +enum drm_color_lut_tests { + /** + * @DRM_COLOR_LUT_EQUAL_CHANNELS: + * + * Checks whether the entries of a LUT all have equal values for the + * red, green, and blue channels. Intended for hardware that only + * accepts a single value per LUT entry and assumes that value applies + * to all three color components. + */ + DRM_COLOR_LUT_EQUAL_CHANNELS = BIT(0), + + /** + * @DRM_COLOR_LUT_NON_DECREASING: + * + * Checks whether the entries of a LUT are always flat or increasing + * (never decreasing). + */ + DRM_COLOR_LUT_NON_DECREASING = BIT(1), +}; + +int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests); +#endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h new file mode 100644 index 000000000..03d39532c --- /dev/null +++ b/include/drm/drm_connector.h @@ -0,0 +1,1747 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_CONNECTOR_H__ +#define __DRM_CONNECTOR_H__ + +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/ctype.h> +#include <linux/hdmi.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_util.h> + +#include <uapi/drm/drm_mode.h> + +struct drm_connector_helper_funcs; +struct drm_modeset_acquire_ctx; +struct drm_device; +struct drm_crtc; +struct drm_encoder; +struct drm_property; +struct drm_property_blob; +struct drm_printer; +struct edid; +struct i2c_adapter; + +enum drm_connector_force { + DRM_FORCE_UNSPECIFIED, + DRM_FORCE_OFF, + DRM_FORCE_ON, /* force on analog part normally */ + DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ +}; + +/** + * enum drm_connector_status - status for a &drm_connector + * + * This enum is used to track the connector status. There are no separate + * #defines for the uapi! + */ +enum drm_connector_status { + /** + * @connector_status_connected: The connector is definitely connected to + * a sink device, and can be enabled. + */ + connector_status_connected = 1, + /** + * @connector_status_disconnected: The connector isn't connected to a + * sink device which can be autodetect. For digital outputs like DP or + * HDMI (which can be realiable probed) this means there's really + * nothing there. It is driver-dependent whether a connector with this + * status can be lit up or not. + */ + connector_status_disconnected = 2, + /** + * @connector_status_unknown: The connector's status could not be + * reliably detected. This happens when probing would either cause + * flicker (like load-detection when the connector is in use), or when a + * hardware resource isn't available (like when load-detection needs a + * free CRTC). It should be possible to light up the connector with one + * of the listed fallback modes. For default configuration userspace + * should only try to light up connectors with unknown status when + * there's not connector with @connector_status_connected. + */ + connector_status_unknown = 3, +}; + +/** + * enum drm_connector_registration_status - userspace registration status for + * a &drm_connector + * + * This enum is used to track the status of initializing a connector and + * registering it with userspace, so that DRM can prevent bogus modesets on + * connectors that no longer exist. + */ +enum drm_connector_registration_state { + /** + * @DRM_CONNECTOR_INITIALIZING: The connector has just been created, + * but has yet to be exposed to userspace. There should be no + * additional restrictions to how the state of this connector may be + * modified. + */ + DRM_CONNECTOR_INITIALIZING = 0, + + /** + * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized + * and registered with sysfs, as such it has been exposed to + * userspace. There should be no additional restrictions to how the + * state of this connector may be modified. + */ + DRM_CONNECTOR_REGISTERED = 1, + + /** + * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed + * to userspace and has since been unregistered and removed from + * userspace, or the connector was unregistered before it had a chance + * to be exposed to userspace (e.g. still in the + * @DRM_CONNECTOR_INITIALIZING state). When a connector is + * unregistered, there are additional restrictions to how its state + * may be modified: + * + * - An unregistered connector may only have its DPMS changed from + * On->Off. Once DPMS is changed to Off, it may not be switched back + * to On. + * - Modesets are not allowed on unregistered connectors, unless they + * would result in disabling its assigned CRTCs. This means + * disabling a CRTC on an unregistered connector is OK, but enabling + * one is not. + * - Removing a CRTC from an unregistered connector is OK, but new + * CRTCs may never be assigned to an unregistered connector. + */ + DRM_CONNECTOR_UNREGISTERED = 2, +}; + +enum subpixel_order { + SubPixelUnknown = 0, + SubPixelHorizontalRGB, + SubPixelHorizontalBGR, + SubPixelVerticalRGB, + SubPixelVerticalBGR, + SubPixelNone, + +}; + +/** + * struct drm_scrambling: sink's scrambling support. + */ +struct drm_scrambling { + /** + * @supported: scrambling supported for rates > 340 Mhz. + */ + bool supported; + /** + * @low_rates: scrambling supported for rates <= 340 Mhz. + */ + bool low_rates; +}; + +/* + * struct drm_scdc - Information about scdc capabilities of a HDMI 2.0 sink + * + * Provides SCDC register support and capabilities related information on a + * HDMI 2.0 sink. In case of a HDMI 1.4 sink, all parameter must be 0. + */ +struct drm_scdc { + /** + * @supported: status control & data channel present. + */ + bool supported; + /** + * @read_request: sink is capable of generating scdc read request. + */ + bool read_request; + /** + * @scrambling: sink's scrambling capabilities + */ + struct drm_scrambling scrambling; +}; + + +/** + * struct drm_hdmi_info - runtime information about the connected HDMI sink + * + * Describes if a given display supports advanced HDMI 2.0 features. + * This information is available in CEA-861-F extension blocks (like HF-VSDB). + */ +struct drm_hdmi_info { + /** @scdc: sink's scdc support and capabilities */ + struct drm_scdc scdc; + + /** + * @y420_vdb_modes: bitmap of modes which can support ycbcr420 + * output only (not normal RGB/YCBCR444/422 outputs). The max VIC + * defined by the CEA-861-G spec is 219, so the size is 256 bits to map + * up to 256 VICs. + */ + unsigned long y420_vdb_modes[BITS_TO_LONGS(256)]; + + /** + * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 + * output also, along with normal HDMI outputs. The max VIC defined by + * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256 + * VICs. + */ + unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)]; + + /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ + u64 y420_cmdb_map; + + /** @y420_dc_modes: bitmap of deep color support index */ + u8 y420_dc_modes; +}; + +/** + * enum drm_link_status - connector's link_status property value + * + * This enum is used as the connector's link status property value. + * It is set to the values defined in uapi. + * + * @DRM_LINK_STATUS_GOOD: DP Link is Good as a result of successful + * link training + * @DRM_LINK_STATUS_BAD: DP Link is BAD as a result of link training + * failure + */ +enum drm_link_status { + DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD, + DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD, +}; + +/** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** + * struct drm_monitor_range_info - Panel's Monitor range in EDID for + * &drm_display_info + * + * This struct is used to store a frequency range supported by panel + * as parsed from EDID's detailed monitor range descriptor block. + * + * @min_vfreq: This is the min supported refresh rate in Hz from + * EDID's detailed monitor range. + * @max_vfreq: This is the max supported refresh rate in Hz from + * EDID's detailed monitor range + */ +struct drm_monitor_range_info { + u8 min_vfreq; + u8 max_vfreq; +}; + +/* + * This is a consolidated colorimetry list supported by HDMI and + * DP protocol standard. The respective connectors will register + * a property with the subset of this list (supported by that + * respective protocol). Userspace will set the colorspace through + * a colorspace property which will be created and exposed to + * userspace. + */ + +/* For Default case, driver will set the colorspace */ +#define DRM_MODE_COLORIMETRY_DEFAULT 0 +/* CEA 861 Normal Colorimetry options */ +#define DRM_MODE_COLORIMETRY_NO_DATA 0 +#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 +#define DRM_MODE_COLORIMETRY_BT709_YCC 2 +/* CEA 861 Extended Colorimetry Options */ +#define DRM_MODE_COLORIMETRY_XVYCC_601 3 +#define DRM_MODE_COLORIMETRY_XVYCC_709 4 +#define DRM_MODE_COLORIMETRY_SYCC_601 5 +#define DRM_MODE_COLORIMETRY_OPYCC_601 6 +#define DRM_MODE_COLORIMETRY_OPRGB 7 +#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 +#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 +#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 +/* Additional Colorimetry extension added as part of CTA 861.G */ +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 +/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */ +#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13 +#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14 +#define DRM_MODE_COLORIMETRY_BT601_YCC 15 + +/** + * enum drm_bus_flags - bus_flags info for &drm_display_info + * + * This enum defines signal polarities and clock edge information for signals on + * a bus as bitmask flags. + * + * The clock edge information is conveyed by two sets of symbols, + * DRM_BUS_FLAGS_*_DRIVE_\* and DRM_BUS_FLAGS_*_SAMPLE_\*. When this enum is + * used to describe a bus from the point of view of the transmitter, the + * \*_DRIVE_\* flags should be used. When used from the point of view of the + * receiver, the \*_SAMPLE_\* flags should be used. The \*_DRIVE_\* and + * \*_SAMPLE_\* flags alias each other, with the \*_SAMPLE_POSEDGE and + * \*_SAMPLE_NEGEDGE flags being equal to \*_DRIVE_NEGEDGE and \*_DRIVE_POSEDGE + * respectively. This simplifies code as signals are usually sampled on the + * opposite edge of the driving edge. Transmitters and receivers may however + * need to take other signal timings into account to convert between driving + * and sample edges. + */ +enum drm_bus_flags { + /** + * @DRM_BUS_FLAG_DE_LOW: + * + * The Data Enable signal is active low + */ + DRM_BUS_FLAG_DE_LOW = BIT(0), + + /** + * @DRM_BUS_FLAG_DE_HIGH: + * + * The Data Enable signal is active high + */ + DRM_BUS_FLAG_DE_HIGH = BIT(1), + + /** + * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: + * + * Data is driven on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = BIT(2), + + /** + * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: + * + * Data is driven on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = BIT(3), + + /** + * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: + * + * Data is sampled on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + + /** + * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: + * + * Data is sampled on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + + /** + * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: + * + * Data is transmitted MSB to LSB on the bus + */ + DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4), + + /** + * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: + * + * Data is transmitted LSB to MSB on the bus + */ + DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5), + + /** + * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: + * + * Sync signals are driven on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = BIT(6), + + /** + * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: + * + * Sync signals are driven on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = BIT(7), + + /** + * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: + * + * Sync signals are sampled on the rising edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, + + /** + * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: + * + * Sync signals are sampled on the falling edge of the pixel clock + */ + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, + + /** + * @DRM_BUS_FLAG_SHARP_SIGNALS: + * + * Set if the Sharp-specific signals (SPL, CLS, PS, REV) must be used + */ + DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8), +}; + +/** + * struct drm_display_info - runtime data about the connected sink + * + * Describes a given display (e.g. CRT or flat panel) and its limitations. For + * fixed display sinks like built-in panels there's not much difference between + * this and &struct drm_connector. But for sinks with a real cable this + * structure is meant to describe all the things at the other end of the cable. + * + * For sinks which provide an EDID this can be filled out by calling + * drm_add_edid_modes(). + */ +struct drm_display_info { + /** + * @width_mm: Physical width in mm. + */ + unsigned int width_mm; + + /** + * @height_mm: Physical height in mm. + */ + unsigned int height_mm; + + /** + * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs. + */ + unsigned int bpc; + + /** + * @subpixel_order: Subpixel order of LCD panels. + */ + enum subpixel_order subpixel_order; + +#define DRM_COLOR_FORMAT_RGB444 (1<<0) +#define DRM_COLOR_FORMAT_YCRCB444 (1<<1) +#define DRM_COLOR_FORMAT_YCRCB422 (1<<2) +#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) + + /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** + * @color_formats: HDMI Color formats, selects between RGB and YCrCb + * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones + * as used to describe the pixel format in framebuffers, and also don't + * match the formats in @bus_formats which are shared with v4l. + */ + u32 color_formats; + + /** + * @bus_formats: Pixel data format on the wire, somewhat redundant with + * @color_formats. Array of size @num_bus_formats encoded using + * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers. + */ + const u32 *bus_formats; + /** + * @num_bus_formats: Size of @bus_formats array. + */ + unsigned int num_bus_formats; + + /** + * @bus_flags: Additional information (like pixel signal polarity) for + * the pixel data on the bus, using &enum drm_bus_flags values + * DRM_BUS_FLAGS\_. + */ + u32 bus_flags; + + /** + * @max_tmds_clock: Maximum TMDS clock rate supported by the + * sink in kHz. 0 means undefined. + */ + int max_tmds_clock; + + /** + * @dvi_dual: Dual-link DVI sink? + */ + bool dvi_dual; + + /** + * @is_hdmi: True if the sink is an HDMI device. + * + * This field shall be used instead of calling + * drm_detect_hdmi_monitor() when possible. + */ + bool is_hdmi; + + /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** + * @rgb_quant_range_selectable: Does the sink support selecting + * the RGB quantization range? + */ + bool rgb_quant_range_selectable; + + /** + * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even + * more stuff redundant with @bus_formats. + */ + u8 edid_hdmi_dc_modes; + + /** + * @cea_rev: CEA revision of the HDMI sink. + */ + u8 cea_rev; + + /** + * @hdmi: advance features of a HDMI sink. + */ + struct drm_hdmi_info hdmi; + + /** + * @non_desktop: Non desktop display (HMD). + */ + bool non_desktop; + + /** + * @monitor_range: Frequency range supported by monitor range descriptor + */ + struct drm_monitor_range_info monitor_range; +}; + +int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats); + +/** + * struct drm_connector_tv_margins - TV connector related margins + * + * Describes the margins in pixels to put around the image on TV + * connectors to deal with overscan. + */ +struct drm_connector_tv_margins { + /** + * @bottom: Bottom margin in pixels. + */ + unsigned int bottom; + + /** + * @left: Left margin in pixels. + */ + unsigned int left; + + /** + * @right: Right margin in pixels. + */ + unsigned int right; + + /** + * @top: Top margin in pixels. + */ + unsigned int top; +}; + +/** + * struct drm_tv_connector_state - TV connector related states + * @subconnector: selected subconnector + * @margins: TV margins + * @mode: TV mode + * @brightness: brightness in percent + * @contrast: contrast in percent + * @flicker_reduction: flicker reduction in percent + * @overscan: overscan in percent + * @saturation: saturation in percent + * @hue: hue in percent + */ +struct drm_tv_connector_state { + enum drm_mode_subconnector subconnector; + struct drm_connector_tv_margins margins; + unsigned int mode; + unsigned int brightness; + unsigned int contrast; + unsigned int flicker_reduction; + unsigned int overscan; + unsigned int saturation; + unsigned int hue; +}; + +/** + * struct drm_connector_state - mutable connector state + */ +struct drm_connector_state { + /** @connector: backpointer to the connector */ + struct drm_connector *connector; + + /** + * @crtc: CRTC to connect connector to, NULL if disabled. + * + * Do not change this directly, use drm_atomic_set_crtc_for_connector() + * instead. + */ + struct drm_crtc *crtc; + + /** + * @best_encoder: + * + * Used by the atomic helpers to select the encoder, through the + * &drm_connector_helper_funcs.atomic_best_encoder or + * &drm_connector_helper_funcs.best_encoder callbacks. + * + * This is also used in the atomic helpers to map encoders to their + * current and previous connectors, see + * drm_atomic_get_old_connector_for_encoder() and + * drm_atomic_get_new_connector_for_encoder(). + * + * NOTE: Atomic drivers must fill this out (either themselves or through + * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will + * not return correct data to userspace. + */ + struct drm_encoder *best_encoder; + + /** + * @link_status: Connector link_status to keep track of whether link is + * GOOD or BAD to notify userspace if retraining is necessary. + */ + enum drm_link_status link_status; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions. + * + * Is only set when @crtc is NULL. + */ + struct drm_crtc_commit *commit; + + /** @tv: TV connector state */ + struct drm_tv_connector_state tv; + + /** + * @self_refresh_aware: + * + * This tracks whether a connector is aware of the self refresh state. + * It should be set to true for those connector implementations which + * understand the self refresh state. This is needed since the crtc + * registers the self refresh helpers and it doesn't know if the + * connectors downstream have implemented self refresh entry/exit. + * + * Drivers should set this to true in atomic_check if they know how to + * handle self_refresh requests. + */ + bool self_refresh_aware; + + /** + * @picture_aspect_ratio: Connector property to control the + * HDMI infoframe aspect ratio setting. + * + * The %DRM_MODE_PICTURE_ASPECT_\* values much match the + * values for &enum hdmi_picture_aspect + */ + enum hdmi_picture_aspect picture_aspect_ratio; + + /** + * @content_type: Connector property to control the + * HDMI infoframe content type setting. + * The %DRM_MODE_CONTENT_TYPE_\* values much + * match the values. + */ + unsigned int content_type; + + /** + * @hdcp_content_type: Connector property to pass the type of + * protected content. This is most commonly used for HDCP. + */ + unsigned int hdcp_content_type; + + /** + * @scaling_mode: Connector property to control the + * upscaling, mostly used for built-in panels. + */ + unsigned int scaling_mode; + + /** + * @content_protection: Connector property to request content + * protection. This is most commonly used for HDCP. + */ + unsigned int content_protection; + + /** + * @colorspace: State variable for Connector property to request + * colorspace change on Sink. This is most commonly used to switch + * to wider color gamuts like BT2020. + */ + u32 colorspace; + + /** + * @writeback_job: Writeback job for writeback connectors + * + * Holds the framebuffer and out-fence for a writeback connector. As + * the writeback completion may be asynchronous to the normal commit + * cycle, the writeback job lifetime is managed separately from the + * normal atomic state by this object. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct drm_writeback_job *writeback_job; + + /** + * @max_requested_bpc: Connector property to limit the maximum bit + * depth of the pixels. + */ + u8 max_requested_bpc; + + /** + * @max_bpc: Connector max_bpc based on the requested max_bpc property + * and the connector bpc limitations obtained from edid. + */ + u8 max_bpc; + + /** + * @hdr_output_metadata: + * DRM blob property for HDR output metadata + */ + struct drm_property_blob *hdr_output_metadata; +}; + +/** + * struct drm_connector_funcs - control connectors on a given device + * + * Each CRTC may have one or more connectors attached to it. The functions + * below allow the core DRM code to control connectors, enumerate available modes, + * etc. + */ +struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * This hook is not used by atomic drivers, remapping of the legacy DPMS + * property is entirely handled in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dpms)(struct drm_connector *connector, int mode); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_connector *connector); + + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * Note that the helper library will already hold + * &drm_mode_config.connection_mutex. Drivers which need to grab additional + * locks to avoid races with concurrent modeset changes need to use + * &drm_connector_helper_funcs.detect_ctx instead. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. + */ + enum drm_connector_status (*detect)(struct drm_connector *connector, + bool force); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ + void (*force)(struct drm_connector *connector); + + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to &drm_connector.modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * &drm_connector.modes. Furthermore it must update + * &drm_connector.status and &drm_connector.edid. If no EDID has been + * received for this output connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into &drm_connector.modes. + */ + int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_connector *connector, struct drm_property *property, + uint64_t val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the connector, light backlight control, i2c, + * DP aux or similar interfaces. It is called late in the driver load + * sequence from drm_connector_register() when registering all the + * core drm connector interfaces. Everything added from this callback + * should be unregistered in the early_unregister callback. + * + * This is called while holding &drm_connector.mutex. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_connector *connector); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the connector from + * late_register(). It is called from drm_connector_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + * + * This is called while holding &drm_connector.mutex. + */ + void (*early_unregister)(struct drm_connector *connector); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ + void (*destroy)(struct drm_connector *connector); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_connector.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_connector *connector, + struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR IOCTL. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ + int (*atomic_get_property)(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_connector_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_connector_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_connector_state *state); + + /** + * @oob_hotplug_event: + * + * This will get called when a hotplug-event for a drm-connector + * has been received from a source outside the display driver / device. + */ + void (*oob_hotplug_event)(struct drm_connector *connector); +}; + +/** + * struct drm_cmdline_mode - DRM Mode passed through the kernel command-line + * + * Each connector can have an initial mode with additional options + * passed through the kernel command line. This structure allows to + * express those parameters and will be filled by the command-line + * parser. + */ +struct drm_cmdline_mode { + /** + * @name: + * + * Name of the mode. + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @specified: + * + * Has a mode been read from the command-line? + */ + bool specified; + + /** + * @refresh_specified: + * + * Did the mode have a preferred refresh rate? + */ + bool refresh_specified; + + /** + * @bpp_specified: + * + * Did the mode have a preferred BPP? + */ + bool bpp_specified; + + /** + * @xres: + * + * Active resolution on the X axis, in pixels. + */ + int xres; + + /** + * @yres: + * + * Active resolution on the Y axis, in pixels. + */ + int yres; + + /** + * @bpp: + * + * Bits per pixels for the mode. + */ + int bpp; + + /** + * @refresh: + * + * Refresh rate, in Hertz. + */ + int refresh; + + /** + * @rb: + * + * Do we need to use reduced blanking? + */ + bool rb; + + /** + * @interlace: + * + * The mode is interlaced. + */ + bool interlace; + + /** + * @cvt: + * + * The timings will be calculated using the VESA Coordinated + * Video Timings instead of looking up the mode from a table. + */ + bool cvt; + + /** + * @margins: + * + * Add margins to the mode calculation (1.8% of xres rounded + * down to 8 pixels and 1.8% of yres). + */ + bool margins; + + /** + * @force: + * + * Ignore the hotplug state of the connector, and force its + * state to one of the DRM_FORCE_* values. + */ + enum drm_connector_force force; + + /** + * @rotation_reflection: + * + * Initial rotation and reflection of the mode setup from the + * command line. See DRM_MODE_ROTATE_* and + * DRM_MODE_REFLECT_*. The only rotations supported are + * DRM_MODE_ROTATE_0 and DRM_MODE_ROTATE_180. + */ + unsigned int rotation_reflection; + + /** + * @panel_orientation: + * + * drm-connector "panel orientation" property override value, + * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set. + */ + enum drm_panel_orientation panel_orientation; + + /** + * @tv_margins: TV margins to apply to the mode. + */ + struct drm_connector_tv_margins tv_margins; +}; + +/** + * struct drm_connector - central DRM connector control structure + * + * Each connector may be connected to one or more CRTCs, or may be clonable by + * another connector if they can share a CRTC. Each connector also has a specific + * position in the broader display (referred to as a 'screen' though it could + * span multiple monitors). + */ +struct drm_connector { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @kdev: kernel device for sysfs attributes */ + struct device *kdev; + /** @attr: sysfs attributes */ + struct device_attribute *attr; + /** + * @fwnode: associated fwnode supplied by platform firmware + * + * Drivers can set this to associate a fwnode with a connector, drivers + * are expected to get a reference on the fwnode when setting this. + * drm_connector_cleanup() will call fwnode_handle_put() on this. + */ + struct fwnode_handle *fwnode; + + /** + * @head: + * + * List of all connectors on a @dev, linked from + * &drm_mode_config.connector_list. Protected by + * &drm_mode_config.connector_list_lock, but please only use + * &drm_connector_list_iter to walk this list. + */ + struct list_head head; + + /** + * @global_connector_list_entry: + * + * Connector entry in the global connector-list, used by + * drm_connector_find_by_fwnode(). + */ + struct list_head global_connector_list_entry; + + /** @base: base KMS object */ + struct drm_mode_object base; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by + * &drm_mode_config.mutex. + */ + struct mutex mutex; + + /** + * @index: Compacted connector index, which matches the position inside + * the mode_config.list for drivers not supporting hot-add/removing. Can + * be used as an array index. It is invariant over the lifetime of the + * connector. + */ + unsigned index; + + /** + * @connector_type: + * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h + */ + int connector_type; + /** @connector_type_id: index into connector type enum */ + int connector_type_id; + /** + * @interlace_allowed: + * Can this connector handle interlaced modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool interlace_allowed; + /** + * @doublescan_allowed: + * Can this connector handle doublescan? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool doublescan_allowed; + /** + * @stereo_allowed: + * Can this connector handle stereo modes? Only used by + * drm_helper_probe_single_connector_modes() for mode filtering. + */ + bool stereo_allowed; + + /** + * @ycbcr_420_allowed : This bool indicates if this connector is + * capable of handling YCBCR 420 output. While parsing the EDID + * blocks it's very helpful to know if the source is capable of + * handling YCBCR 420 outputs. + */ + bool ycbcr_420_allowed; + + /** + * @registration_state: Is this connector initializing, exposed + * (registered) with userspace, or unregistered? + * + * Protected by @mutex. + */ + enum drm_connector_registration_state registration_state; + + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by &drm_mode_config.mutex. + */ + struct list_head modes; + + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by &drm_mode_config.mutex. + */ + enum drm_connector_status status; + + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers. Protected by + * &drm_mode_config.mutex. + */ + struct list_head probed_modes; + + /** + * @display_info: Display information is filled from EDID information + * when a display is detected. For non hot-pluggable displays such as + * flat panels in embedded systems, the driver should initialize the + * &drm_display_info.width_mm and &drm_display_info.height_mm fields + * with the physical size of the display. + * + * Protected by &drm_mode_config.mutex. + */ + struct drm_display_info display_info; + + /** @funcs: connector control functions */ + const struct drm_connector_funcs *funcs; + + /** + * @edid_blob_ptr: DRM property containing EDID if present. Protected by + * &drm_mode_config.mutex. This should be updated only by calling + * drm_connector_update_edid_property(). + */ + struct drm_property_blob *edid_blob_ptr; + + /** @properties: property tracking for this connector */ + struct drm_object_properties properties; + + /** + * @scaling_mode_property: Optional atomic property to control the + * upscaling. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *scaling_mode_property; + + /** + * @vrr_capable_property: Optional property to help userspace + * query hardware support for variable refresh rate on a connector. + * connector. Drivers can add the property to a connector by + * calling drm_connector_attach_vrr_capable_property(). + * + * This should be updated only by calling + * drm_connector_set_vrr_capable_property(). + */ + struct drm_property *vrr_capable_property; + + /** + * @colorspace_property: Connector property to set the suitable + * colorspace supported by the sink. + */ + struct drm_property *colorspace_property; + + /** + * @path_blob_ptr: + * + * DRM blob property data for the DP MST path property. This should only + * be updated by calling drm_connector_set_path_property(). + */ + struct drm_property_blob *path_blob_ptr; + + /** + * @max_bpc_property: Default connector property for the max bpc to be + * driven out of the connector. + */ + struct drm_property *max_bpc_property; + +#define DRM_CONNECTOR_POLL_HPD (1 << 0) +#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) +#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) + + /** + * @polled: + * + * Connector polling mode, a combination of + * + * DRM_CONNECTOR_POLL_HPD + * The connector generates hotplug events and doesn't need to be + * periodically polled. The CONNECT and DISCONNECT flags must not + * be set together with the HPD flag. + * + * DRM_CONNECTOR_POLL_CONNECT + * Periodically poll the connector for connection. + * + * DRM_CONNECTOR_POLL_DISCONNECT + * Periodically poll the connector for disconnection, without + * causing flickering even when the connector is in use. DACs should + * rarely do this without a lot of testing. + * + * Set to 0 for connectors that don't support connection status + * discovery. + */ + uint8_t polled; + + /** + * @dpms: Current dpms state. For legacy drivers the + * &drm_connector_funcs.dpms callback must update this. For atomic + * drivers, this is handled by the core atomic code, and drivers must + * only take &drm_crtc_state.active into account. + */ + int dpms; + + /** @helper_private: mid-layer private data */ + const struct drm_connector_helper_funcs *helper_private; + + /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */ + struct drm_cmdline_mode cmdline_mode; + /** @force: a DRM_FORCE_<foo> state for forced mode sets */ + enum drm_connector_force force; + /** @override_edid: has the EDID been overwritten through debugfs for testing? */ + bool override_edid; + /** @epoch_counter: used to detect any other changes in connector, besides status */ + u64 epoch_counter; + + /** + * @possible_encoders: Bit mask of encoders that can drive this + * connector, drm_encoder_index() determines the index into the bitfield + * and the bits are set with drm_connector_attach_encoder(). + */ + u32 possible_encoders; + + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; + +#define MAX_ELD_BYTES 128 + /** @eld: EDID-like data, if present */ + uint8_t eld[MAX_ELD_BYTES]; + /** @latency_present: AV delay info from ELD, if found */ + bool latency_present[2]; + /** + * @video_latency: Video latency info from ELD, if found. + * [0]: progressive, [1]: interlaced + */ + int video_latency[2]; + /** + * @audio_latency: audio latency info from ELD, if found + * [0]: progressive, [1]: interlaced + */ + int audio_latency[2]; + + /** + * @ddc: associated ddc adapter. + * A connector usually has its associated ddc adapter. If a driver uses + * this field, then an appropriate symbolic link is created in connector + * sysfs directory to make it easy for the user to tell which i2c + * adapter is for a particular display. + * + * The field should be set by calling drm_connector_init_with_ddc(). + */ + struct i2c_adapter *ddc; + + /** + * @null_edid_counter: track sinks that give us all zeros for the EDID. + * Needed to workaround some HW bugs where we get all 0s + */ + int null_edid_counter; + + /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ + unsigned bad_edid_counter; + + /** + * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used + * in Displayport compliance testing - Displayport Link CTS Core 1.2 + * rev1.1 4.2.2.6 + */ + bool edid_corrupt; + /** + * @real_edid_checksum: real edid checksum for corrupted edid block. + * Required in Displayport 1.4 compliance testing + * rev1.1 4.2.2.6 + */ + u8 real_edid_checksum; + + /** @debugfs_entry: debugfs directory for this connector */ + struct dentry *debugfs_entry; + + /** + * @state: + * + * Current atomic state for this connector. + * + * This is protected by &drm_mode_config.connection_mutex. Note that + * nonblocking atomic commits access the current connector state without + * taking locks. Either by going through the &struct drm_atomic_state + * pointers, see for_each_oldnew_connector_in_state(), + * for_each_old_connector_in_state() and + * for_each_new_connector_in_state(). Or through careful ordering of + * atomic commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_connector_state *state; + + /* DisplayID bits. FIXME: Extract into a substruct? */ + + /** + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. + * + * This should only be updated by calling + * drm_connector_set_tile_property(). + */ + struct drm_property_blob *tile_blob_ptr; + + /** @has_tile: is this connector connected to a tiled monitor */ + bool has_tile; + /** @tile_group: tile group for the connected monitor */ + struct drm_tile_group *tile_group; + /** @tile_is_single_monitor: whether the tile is one monitor housing */ + bool tile_is_single_monitor; + + /** @num_h_tile: number of horizontal tiles in the tile group */ + /** @num_v_tile: number of vertical tiles in the tile group */ + uint8_t num_h_tile, num_v_tile; + /** @tile_h_loc: horizontal location of this tile */ + /** @tile_v_loc: vertical location of this tile */ + uint8_t tile_h_loc, tile_v_loc; + /** @tile_h_size: horizontal size of this tile. */ + /** @tile_v_size: vertical size of this tile. */ + uint16_t tile_h_size, tile_v_size; + + /** + * @free_node: + * + * List used only by &drm_connector_list_iter to be able to clean up a + * connector from any context, in conjunction with + * &drm_mode_config.connector_free_work. + */ + struct llist_node free_node; + + /** @hdr_sink_metadata: HDR Metadata Information read from sink */ + struct hdr_sink_metadata hdr_sink_metadata; +}; + +#define obj_to_connector(x) container_of(x, struct drm_connector, base) + +int drm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type); +int drm_connector_init_with_ddc(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); +void drm_connector_attach_edid_property(struct drm_connector *connector); +int drm_connector_register(struct drm_connector *connector); +void drm_connector_unregister(struct drm_connector *connector); +int drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +void drm_connector_cleanup(struct drm_connector *connector); + +static inline unsigned int drm_connector_index(const struct drm_connector *connector) +{ + return connector->index; +} + +static inline u32 drm_connector_mask(const struct drm_connector *connector) +{ + return 1 << connector->index; +} + +/** + * drm_connector_lookup - lookup connector object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: connector object id + * + * This function looks up the connector object specified by id + * add takes a reference to it. + */ +static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR); + return mo ? obj_to_connector(mo) : NULL; +} + +/** + * drm_connector_get - acquire a connector reference + * @connector: DRM connector + * + * This function increments the connector's refcount. + */ +static inline void drm_connector_get(struct drm_connector *connector) +{ + drm_mode_object_get(&connector->base); +} + +/** + * drm_connector_put - release a connector reference + * @connector: DRM connector + * + * This function decrements the connector's reference count and frees the + * object if the reference count drops to zero. + */ +static inline void drm_connector_put(struct drm_connector *connector) +{ + drm_mode_object_put(&connector->base); +} + +/** + * drm_connector_is_unregistered - has the connector been unregistered from + * userspace? + * @connector: DRM connector + * + * Checks whether or not @connector has been unregistered from userspace. + * + * Returns: + * True if the connector was unregistered, false if the connector is + * registered or has not yet been registered with userspace. + */ +static inline bool +drm_connector_is_unregistered(struct drm_connector *connector) +{ + return READ_ONCE(connector->registration_state) == + DRM_CONNECTOR_UNREGISTERED; +} + +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +const char *drm_get_connector_type_name(unsigned int connector_type); +const char *drm_get_connector_status_name(enum drm_connector_status status); +const char *drm_get_subpixel_order_name(enum subpixel_order order); +const char *drm_get_dpms_name(int val); +const char *drm_get_dvi_i_subconnector_name(int val); +const char *drm_get_dvi_i_select_name(int val); +const char *drm_get_tv_subconnector_name(int val); +const char *drm_get_tv_select_name(int val); +const char *drm_get_dp_subconnector_name(int val); +const char *drm_get_content_protection_name(int val); +const char *drm_get_hdcp_content_type_name(int val); + +int drm_mode_create_dvi_i_properties(struct drm_device *dev); +void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector); + +int drm_mode_create_tv_margin_properties(struct drm_device *dev); +int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + const char * const modes[]); +void drm_connector_attach_tv_margin_properties(struct drm_connector *conn); +int drm_mode_create_scaling_mode_property(struct drm_device *dev); +int drm_connector_attach_content_type_property(struct drm_connector *dev); +int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, + u32 scaling_mode_mask); +int drm_connector_attach_vrr_capable_property( + struct drm_connector *connector); +int drm_mode_create_aspect_ratio_property(struct drm_device *dev); +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector); +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector); +int drm_mode_create_content_type_property(struct drm_device *dev); +void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +int drm_mode_create_suggested_offset_properties(struct drm_device *dev); + +int drm_connector_set_path_property(struct drm_connector *connector, + const char *path); +int drm_connector_set_tile_property(struct drm_connector *connector); +int drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid); +void drm_connector_set_link_status_property(struct drm_connector *connector, + uint64_t link_status); +void drm_connector_set_vrr_capable_property( + struct drm_connector *connector, bool capable); +int drm_connector_set_panel_orientation( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation); +int drm_connector_set_panel_orientation_with_quirk( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation, + int width, int height); +int drm_connector_attach_max_bpc_property(struct drm_connector *connector, + int min, int max); + +/** + * struct drm_tile_group - Tile group metadata + * @refcount: reference count + * @dev: DRM device + * @id: tile group id exposed to userspace + * @group_data: Sink-private data identifying this group + * + * @group_data corresponds to displayid vend/prod/serial for external screens + * with an EDID. + */ +struct drm_tile_group { + struct kref refcount; + struct drm_device *dev; + int id; + u8 group_data[8]; +}; + +struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + const char topology[8]); +struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + const char topology[8]); +void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg); + +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); + +bool drm_connector_has_possible_encoder(struct drm_connector *connector, + struct drm_encoder *encoder); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_end() then you need to grab + * your own reference first using drm_connector_get(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) + +/** + * drm_connector_for_each_possible_encoder - iterate connector's possible encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + */ +#define drm_connector_for_each_possible_encoder(connector, encoder) \ + drm_for_each_encoder_mask(encoder, (connector)->dev, \ + (connector)->possible_encoders) + +#endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h new file mode 100644 index 000000000..59b51a09c --- /dev/null +++ b/include/drm/drm_crtc.h @@ -0,0 +1,1269 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_CRTC_H__ +#define __DRM_CRTC_H__ + +#include <linux/i2c.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/fb.h> +#include <linux/hdmi.h> +#include <linux/media-bus-format.h> +#include <uapi/drm/drm_mode.h> +#include <uapi/drm/drm_fourcc.h> +#include <drm/drm_modeset_lock.h> +#include <drm/drm_rect.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_modes.h> +#include <drm/drm_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_property.h> +#include <drm/drm_edid.h> +#include <drm/drm_plane.h> +#include <drm/drm_blend.h> +#include <drm/drm_color_mgmt.h> +#include <drm/drm_debugfs_crc.h> +#include <drm/drm_mode_config.h> + +struct drm_device; +struct drm_mode_set; +struct drm_file; +struct drm_clip_rect; +struct drm_printer; +struct drm_self_refresh_data; +struct device_node; +struct dma_fence; +struct edid; + +static inline int64_t U642I64(uint64_t val) +{ + return (int64_t)*((int64_t *)&val); +} +static inline uint64_t I642U64(int64_t val) +{ + return (uint64_t)*((uint64_t *)&val); +} + +struct drm_crtc; +struct drm_pending_vblank_event; +struct drm_plane; +struct drm_bridge; +struct drm_atomic_state; + +struct drm_crtc_helper_funcs; +struct drm_plane_helper_funcs; + +/** + * struct drm_crtc_state - mutable CRTC state + * + * Note that the distinction between @enable and @active is rather subtle: + * Flipping @active while @enable is set without changing anything else may + * never return in a failure from the &drm_mode_config_funcs.atomic_check + * callback. Userspace assumes that a DPMS On will always succeed. In other + * words: @enable controls resource assignment, @active controls the actual + * hardware state. + * + * The three booleans active_changed, connectors_changed and mode_changed are + * intended to indicate whether a full modeset is needed, rather than strictly + * describing what has changed in a commit. See also: + * drm_atomic_crtc_needs_modeset() + * + * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or + * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control + * state like @plane_mask so drivers not converted over to atomic helpers should + * not rely on these being accurate! + */ +struct drm_crtc_state { + /** @crtc: backpointer to the CRTC */ + struct drm_crtc *crtc; + + /** + * @enable: Whether the CRTC should be enabled, gates all other state. + * This controls reservations of shared resources. Actual hardware state + * is controlled by @active. + */ + bool enable; + + /** + * @active: Whether the CRTC is actively displaying (used for DPMS). + * Implies that @enable is set. The driver must not release any shared + * resources if @active is set to false but @enable still true, because + * userspace expects that a DPMS ON always succeeds. + * + * Hence drivers must not consult @active in their various + * &drm_mode_config_funcs.atomic_check callback to reject an atomic + * commit. They can consult it to aid in the computation of derived + * hardware state, since even in the DPMS OFF state the display hardware + * should be as much powered down as when the CRTC is completely + * disabled through setting @enable to false. + */ + bool active; + + /** + * @planes_changed: Planes on this crtc are updated. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. + */ + bool planes_changed : 1; + + /** + * @mode_changed: @mode or @enable has been changed. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this for any CRTC state changes that + * require a full modeset. They can also reset it to false if e.g. a + * @mode change can be done without a full modeset by only changing + * scaler settings. + */ + bool mode_changed : 1; + + /** + * @active_changed: @active has been toggled. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + */ + bool active_changed : 1; + + /** + * @connectors_changed: Connectors to this crtc have been updated, + * either in their state or routing. Used by the atomic + * helpers and drivers to steer the atomic commit control flow. See also + * drm_atomic_crtc_needs_modeset(). + * + * Drivers are supposed to set this as-needed from their own atomic + * check code, e.g. from &drm_encoder_helper_funcs.atomic_check + */ + bool connectors_changed : 1; + /** + * @zpos_changed: zpos values of planes on this crtc have been updated. + * Used by the atomic helpers and drivers to steer the atomic commit + * control flow. + */ + bool zpos_changed : 1; + /** + * @color_mgmt_changed: Color management properties have changed + * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and + * drivers to steer the atomic commit control flow. + */ + bool color_mgmt_changed : 1; + + /** + * @no_vblank: + * + * Reflects the ability of a CRTC to send VBLANK events. This state + * usually depends on the pipeline configuration. If set to true, DRM + * atomic helpers will send out a fake VBLANK event during display + * updates after all hardware changes have been committed. This is + * implemented in drm_atomic_helper_fake_vblank(). + * + * One usage is for drivers and/or hardware without support for VBLANK + * interrupts. Such drivers typically do not initialize vblanking + * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs + * without initialized vblanking, this field is set to true in + * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be + * send out on each update of the display pipeline by + * drm_atomic_helper_fake_vblank(). + * + * Another usage is CRTCs feeding a writeback connector operating in + * oneshot mode. In this case the fake VBLANK event is only generated + * when a job is queued to the writeback connector, and we want the + * core to fake VBLANK events when this part of the pipeline hasn't + * changed but others had or when the CRTC and connectors are being + * disabled. + * + * __drm_atomic_helper_crtc_duplicate_state() will not reset the value + * from the current state, the CRTC driver is then responsible for + * updating this field when needed. + * + * Note that the combination of &drm_crtc_state.event == NULL and + * &drm_crtc_state.no_blank == true is valid and usually used when the + * writeback connector attached to the CRTC has a new job queued. In + * this case the driver will send the VBLANK event on its own when the + * writeback job is complete. + */ + bool no_vblank : 1; + + /** + * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to + * this CRTC. + */ + u32 plane_mask; + + /** + * @connector_mask: Bitmask of drm_connector_mask(connector) of + * connectors attached to this CRTC. + */ + u32 connector_mask; + + /** + * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders + * attached to this CRTC. + */ + u32 encoder_mask; + + /** + * @adjusted_mode: + * + * Internal display timings which can be used by the driver to handle + * differences between the mode requested by userspace in @mode and what + * is actually programmed into the hardware. + * + * For drivers using &drm_bridge, this stores hardware display timings + * used between the CRTC and the first bridge. For other drivers, the + * meaning of the adjusted_mode field is purely driver implementation + * defined information, and will usually be used to store the hardware + * display timings used between the CRTC and encoder blocks. + */ + struct drm_display_mode adjusted_mode; + + /** + * @mode: + * + * Display timings requested by userspace. The driver should try to + * match the refresh rate as close as possible (but note that it's + * undefined what exactly is close enough, e.g. some of the HDMI modes + * only differ in less than 1% of the refresh rate). The active width + * and height as observed by userspace for positioning planes must match + * exactly. + * + * For external connectors where the sink isn't fixed (like with a + * built-in panel), this mode here should match the physical mode on the + * wire to the last details (i.e. including sync polarities and + * everything). + */ + struct drm_display_mode mode; + + /** + * @mode_blob: &drm_property_blob for @mode, for exposing the mode to + * atomic userspace. + */ + struct drm_property_blob *mode_blob; + + /** + * @degamma_lut: + * + * Lookup table for converting framebuffer pixel data before apply the + * color conversion matrix @ctm. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + + /** + * @ctm: + * + * Color transformation matrix. See drm_crtc_enable_color_mgmt(). The + * blob (if not NULL) is a &struct drm_color_ctm. + */ + struct drm_property_blob *ctm; + + /** + * @gamma_lut: + * + * Lookup table for converting pixel data after the color conversion + * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not + * NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *gamma_lut; + + /** + * @target_vblank: + * + * Target vertical blank period when a page flip + * should take effect. + */ + u32 target_vblank; + + /** + * @async_flip: + * + * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy + * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet. + */ + bool async_flip; + + /** + * @vrr_enabled: + * + * Indicates if variable refresh rate should be enabled for the CRTC. + * Support for the requested vrr state will depend on driver and + * hardware capabiltiy - lacking support is not treated as failure. + */ + bool vrr_enabled; + + /** + * @self_refresh_active: + * + * Used by the self refresh helpers to denote when a self refresh + * transition is occurring. This will be set on enable/disable callbacks + * when self refresh is being enabled or disabled. In some cases, it may + * not be desirable to fully shut off the crtc during self refresh. + * CRTC's can inspect this flag and determine the best course of action. + */ + bool self_refresh_active; + + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of the + * state update. The driver must send out the event when the atomic + * commit operation completes. There are two cases: + * + * - The event is for a CRTC which is being disabled through this + * atomic commit. In that case the event can be send out any time + * after the hardware has stopped scanning out the current + * framebuffers. It should contain the timestamp and counter for the + * last vblank before the display pipeline was shut off. The simplest + * way to achieve that is calling drm_crtc_send_vblank_event() + * somewhen after drm_crtc_vblank_off() has been called. + * + * - For a CRTC which is enabled at the end of the commit (even when it + * undergoes an full modeset) the vblank timestamp and counter must + * be for the vblank right before the first frame that scans out the + * new set of buffers. Again the event can only be sent out after the + * hardware has stopped scanning out the old buffers. + * + * - Events for disabled CRTCs are not allowed, and drivers can ignore + * that case. + * + * For very simple hardware without VBLANK interrupt, enabling + * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers + * send a fake VBLANK event at the end of the display update after all + * hardware changes have been applied. See + * drm_atomic_helper_fake_vblank(). + * + * For more complex hardware this + * can be handled by the drm_crtc_send_vblank_event() function, + * which the driver should call on the provided event upon completion of + * the atomic commit. Note that if the driver supports vblank signalling + * and timestamping the vblank counters and timestamps must agree with + * the ones returned from page flip events. With the current vblank + * helper infrastructure this can be achieved by holding a vblank + * reference while the page flip is pending, acquired through + * drm_crtc_vblank_get() and released with drm_crtc_vblank_put(). + * Drivers are free to implement their own vblank counter and timestamp + * tracking though, e.g. if they have accurate timestamp registers in + * hardware. + * + * For hardware which supports some means to synchronize vblank + * interrupt delivery with committing display state there's also + * drm_crtc_arm_vblank_event(). See the documentation of that function + * for a detailed discussion of the constraints it needs to be used + * safely. + * + * If the device can't notify of flip completion in a race-free way + * at all, then the event should be armed just after the page flip is + * committed. In the worst case the driver will send the event to + * userspace one frame too late. This doesn't allow for a real atomic + * update, but it should avoid tearing. + */ + struct drm_pending_vblank_event *event; + + /** + * @commit: + * + * This tracks how the commit for this update proceeds through the + * various phases. This is never cleared, except when we destroy the + * state, so that subsequent commits can synchronize with previous ones. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +/** + * struct drm_crtc_funcs - control CRTCs for a given device + * + * The drm_crtc_funcs structure is the central CRTC management structure + * in the DRM. Each CRTC controls one or more connectors (note that the name + * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. + * connectors, not just CRTs). + * + * Each driver is responsible for filling out this structure at startup time, + * in addition to providing other modesetting features, like i2c and DDC + * bus accessors. + */ +struct drm_crtc_funcs { + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_crtc *crtc); + + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height, + int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*cursor_move)(struct drm_crtc *crtc, int x, int y); + + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * Atomic drivers who want to support gamma tables should implement the + * atomic color management support, enabled by calling + * drm_crtc_enable_color_mgmt(), which then supports the legacy gamma + * interface through the drm_atomic_helper_legacy_gamma_set() + * compatibility implementation. + */ + int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t size, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up CRTC resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_crtc *crtc); + + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * &struct drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_config)(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls this hook with a pointer to the new + * frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a &struct drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * page flip operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. + */ + int (*page_flip)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @page_flip_target: + * + * Same as @page_flip but with an additional parameter specifying the + * absolute target vertical blank period (as reported by + * drm_crtc_vblank_count()) when the flip should take effect. + * + * Note that the core code calls drm_crtc_vblank_get before this entry + * point, and will call drm_crtc_vblank_put if this entry point returns + * any non-0 error code. It's the driver's responsibility to call + * drm_crtc_vblank_put after this entry point returns 0, typically when + * the flip completes. + */ + int (*page_flip_target)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, uint32_t target, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_crtc *crtc, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_crtc_state should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_crtc.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC IOCTL. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ + int (*atomic_get_property)(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the crtc like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_crtc *crtc); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the crtc from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_crtc *crtc); + + /** + * @set_crc_source: + * + * Changes the source of CRC checksums of frames at the request of + * userspace, typically for testing purposes. The sources available are + * specific of each driver and a %NULL value indicates that CRC + * generation is to be switched off. + * + * When CRC generation is enabled, the driver should call + * drm_crtc_add_crc_entry() at each frame, providing any information + * that characterizes the frame contents in the crcN arguments, as + * provided from the configured source. Drivers must accept an "auto" + * source name that will select a default source for this CRTC. + * + * This may trigger an atomic modeset commit if necessary, to enable CRC + * generation. + * + * Note that "auto" can depend upon the current modeset configuration, + * e.g. it could pick an encoder or output specific CRC sampling point. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_crc_source)(struct drm_crtc *crtc, const char *source); + + /** + * @verify_crc_source: + * + * verifies the source of CRC checksums of frames before setting the + * source for CRC and during crc open. Source parameter can be NULL + * while disabling crc source. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*verify_crc_source)(struct drm_crtc *crtc, const char *source, + size_t *values_cnt); + /** + * @get_crc_sources: + * + * Driver callback for getting a list of all the available sources for + * CRC generation. This callback depends upon verify_crc_source, So + * verify_crc_source callback should be implemented before implementing + * this. Driver can pass full list of available crc sources, this + * callback does the verification on each crc-source before passing it + * to userspace. + * + * This callback is optional if the driver does not support exporting of + * possible CRC sources list. + * + * RETURNS: + * + * a constant character pointer to the list of all the available CRC + * sources. On failure driver should return NULL. count should be + * updated with number of sources in list. if zero we don't process any + * source from the list. + */ + const char *const *(*get_crc_sources)(struct drm_crtc *crtc, + size_t *count); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_crtc_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_crtc_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_crtc_state *state); + + /** + * @get_vblank_counter: + * + * Driver callback for fetching a raw hardware vblank counter for the + * CRTC. It's meant to be used by new drivers as the replacement of + * &drm_driver.get_vblank_counter hook. + * + * This callback is optional. If a device doesn't have a hardware + * counter, the driver can simply leave the hook as NULL. The DRM core + * will account for missed vblank events while interrupts where disabled + * based on system timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code, as long as drivers call + * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or + * enabling a CRTC. + * + * See also &drm_device.vblank_disable_immediate and + * &drm_device.max_vblank_count. + * + * Returns: + * + * Raw vblank counter value. + */ + u32 (*get_vblank_counter)(struct drm_crtc *crtc); + + /** + * @enable_vblank: + * + * Enable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.enable_vblank hook. + * + * Returns: + * + * Zero on success, appropriate errno if the vblank interrupt cannot + * be enabled. + */ + int (*enable_vblank)(struct drm_crtc *crtc); + + /** + * @disable_vblank: + * + * Disable vblank interrupts for the CRTC. It's meant to be used by + * new drivers as the replacement of &drm_driver.disable_vblank hook. + */ + void (*disable_vblank)(struct drm_crtc *crtc); + + /** + * @get_vblank_timestamp: + * + * Called by drm_get_last_vbltimestamp(). Should return a precise + * timestamp when the most recent vblank interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of vblank will start scanning out, + * the time immediately after end of the vblank interval. If the + * @crtc is currently inside vblank, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * Parameters: + * + * crtc: + * CRTC for which timestamp should be returned. + * max_error: + * Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * vblank_time: + * Target location for returned vblank timestamp. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq quirks + * if flag is set. + * + * Returns: + * + * True on success, false on failure, which means the core should + * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). + */ + bool (*get_vblank_timestamp)(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); +}; + +/** + * struct drm_crtc - central CRTC control structure + * + * Each CRTC may have one or more connectors associated with it. This structure + * allows the CRTC to be controlled. + */ +struct drm_crtc { + /** @dev: parent DRM device */ + struct drm_device *dev; + /** @port: OF node used by drm_of_find_possible_crtcs(). */ + struct device_node *port; + /** + * @head: + * + * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * This provides a read lock for the overall CRTC state (mode, dpms + * state, ...) and a write lock for everything which can be update + * without a full modeset (fb, cursor data, CRTC properties ...). A full + * modeset also need to grab &drm_mode_config.connection_mutex. + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base KMS object for ID tracking etc. */ + struct drm_mode_object base; + + /** + * @primary: + * Primary plane for this CRTC. Note that this is only + * relevant for legacy IOCTL, it specifies the plane implicitly used by + * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *primary; + + /** + * @cursor: + * Cursor plane for this CRTC. Note that this is only relevant for + * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR + * and SETCURSOR2 IOCTLs. It does not have any significance + * beyond that. + */ + struct drm_plane *cursor; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the CRTC. + */ + unsigned index; + + /** + * @cursor_x: Current x position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_x + * of the cursor plane instead. + */ + int cursor_x; + /** + * @cursor_y: Current y position of the cursor, used for universal + * cursor planes because the SETCURSOR IOCTL only can update the + * framebuffer without supplying the coordinates. Drivers should not use + * this directly, atomic drivers should look at &drm_plane_state.crtc_y + * of the cursor plane instead. + */ + int cursor_y; + + /** + * @enabled: + * + * Is this CRTC enabled? Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.enable and + * &drm_crtc_state.active. Atomic drivers can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + bool enabled; + + /** + * @mode: + * + * Current mode timings. Should only be used by legacy drivers, atomic + * drivers should instead consult &drm_crtc_state.mode. Atomic drivers + * can update this by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + struct drm_display_mode mode; + + /** + * @hwmode: + * + * Programmed mode in hw, after adjustments for encoders, crtc, panel + * scaling etc. Should only be used by legacy drivers, for high + * precision vblank timestamps in + * drm_crtc_vblank_helper_get_vblank_timestamp(). + * + * Note that atomic drivers should not use this, but instead use + * &drm_crtc_state.adjusted_mode. And for high-precision timestamps + * drm_crtc_vblank_helper_get_vblank_timestamp() used + * &drm_vblank_crtc.hwmode, + * which is filled out by calling drm_calc_timestamping_constants(). + */ + struct drm_display_mode hwmode; + + /** + * @x: + * x position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_x of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int x; + /** + * @y: + * y position on screen. Should only be used by legacy drivers, atomic + * drivers should look at &drm_plane_state.crtc_y of the primary plane + * instead. Updated by calling + * drm_atomic_helper_update_legacy_modeset_state(). + */ + int y; + + /** @funcs: CRTC control functions */ + const struct drm_crtc_funcs *funcs; + + /** + * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up + * by calling drm_mode_crtc_set_gamma_size(). + */ + uint32_t gamma_size; + + /** + * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and + * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). + */ + uint16_t *gamma_store; + + /** @helper_private: mid-layer private data */ + const struct drm_crtc_helper_funcs *helper_private; + + /** @properties: property tracking for this CRTC */ + struct drm_object_properties properties; + + /** + * @state: + * + * Current atomic state for this CRTC. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current CRTC state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and + * for_each_new_crtc_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_crtc_state *state; + + /** + * @commit_list: + * + * List of &drm_crtc_commit structures tracking pending commits. + * Protected by @commit_lock. This list holds its own full reference, + * as does the ongoing commit. + * + * "Note that the commit for a state change is also tracked in + * &drm_crtc_state.commit. For accessing the immediately preceding + * commit in an atomic update it is recommended to just use that + * pointer in the old CRTC state, since accessing that doesn't need + * any locking or list-walking. @commit_list should only be used to + * stall for framebuffer cleanup that's signalled through + * &drm_crtc_commit.cleanup_done." + */ + struct list_head commit_list; + + /** + * @commit_lock: + * + * Spinlock to protect @commit_list. + */ + spinlock_t commit_lock; + +#ifdef CONFIG_DEBUG_FS + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; +#endif + + /** + * @crc: + * + * Configuration settings of CRC capture. + */ + struct drm_crtc_crc crc; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the CRTC's timeline. + */ + unsigned long fence_seqno; + + /** + * @timeline_name: + * + * The name of the CRTC's fence timeline. + */ + char timeline_name[32]; + + /** + * @self_refresh_data: Holds the state for the self refresh helpers + * + * Initialized via drm_self_refresh_helper_init(). + */ + struct drm_self_refresh_data *self_refresh_data; +}; + +/** + * struct drm_mode_set - new values for a CRTC config change + * @fb: framebuffer to use for new config + * @crtc: CRTC whose configuration we're about to change + * @mode: mode timings to use + * @x: position of this CRTC relative to @fb + * @y: position of this CRTC relative to @fb + * @connectors: array of connectors to drive with this CRTC if possible + * @num_connectors: size of @connectors array + * + * This represents a modeset configuration for the legacy SETCRTC ioctl and is + * also used internally. Atomic drivers instead use &drm_atomic_state. + */ +struct drm_mode_set { + struct drm_framebuffer *fb; + struct drm_crtc *crtc; + struct drm_display_mode *mode; + + uint32_t x; + uint32_t y; + + struct drm_connector **connectors; + size_t num_connectors; +}; + +#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) + +__printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); +void drm_crtc_cleanup(struct drm_crtc *crtc); + +/** + * drm_crtc_index - find the index of a registered CRTC + * @crtc: CRTC to find index for + * + * Given a registered CRTC, return the index of that CRTC within a DRM + * device's list of CRTCs. + */ +static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc) +{ + return crtc->index; +} + +/** + * drm_crtc_mask - find the mask of a registered CRTC + * @crtc: CRTC to find mask for + * + * Given a registered CRTC, return the mask bit of that CRTC for the + * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields. + */ +static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) +{ + return 1 << drm_crtc_index(crtc); +} + +int drm_mode_set_config_internal(struct drm_mode_set *set); +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); + +/** + * drm_crtc_find - look up a CRTC object from its ID + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: &drm_mode_object ID + * + * This can be used to look up a CRTC from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ +static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); + return mo ? obj_to_crtc(mo) : NULL; +} + +/** + * drm_for_each_crtc - iterate over all CRTCs + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ +#define drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) + +#endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h new file mode 100644 index 000000000..a6d520d5b --- /dev/null +++ b/include/drm/drm_crtc_helper.h @@ -0,0 +1,61 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if they code they do use at least + * provides a consistent interface and operation to userspace + */ + +#ifndef __DRM_CRTC_HELPER_H__ +#define __DRM_CRTC_HELPER_H__ + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/idr.h> + +#include <linux/fb.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_modeset_helper.h> + +void drm_helper_disable_unused_functions(struct drm_device *dev); +int drm_crtc_helper_set_config(struct drm_mode_set *set, + struct drm_modeset_acquire_ctx *ctx); +bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, + struct drm_framebuffer *old_fb); +bool drm_helper_crtc_in_use(struct drm_crtc *crtc); +bool drm_helper_encoder_in_use(struct drm_encoder *encoder); + +int drm_helper_connector_dpms(struct drm_connector *connector, int mode); + +void drm_helper_resume_force_mode(struct drm_device *dev); +int drm_helper_force_disable_all(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h new file mode 100644 index 000000000..40c34a5bf --- /dev/null +++ b/include/drm/drm_damage_helper.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Deepak Rawat <drawat@vmware.com> + * + **************************************************************************/ + +#ifndef DRM_DAMAGE_HELPER_H_ +#define DRM_DAMAGE_HELPER_H_ + +#include <drm/drm_atomic_helper.h> + +/** + * drm_atomic_for_each_plane_damage - Iterator macro for plane damage. + * @iter: The iterator to advance. + * @rect: Return a rectangle in fb coordinate clipped to plane src. + * + * Note that if the first call to iterator macro return false then no need to do + * plane update. Iterator will return full plane src when damage is not passed + * by user-space. + */ +#define drm_atomic_for_each_plane_damage(iter, rect) \ + while (drm_atomic_helper_damage_iter_next(iter, rect)) + +/** + * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator. + * + * This structure tracks state needed to walk the list of plane damage clips. + */ +struct drm_atomic_helper_damage_iter { + /* private: Plane src in whole number. */ + struct drm_rect plane_src; + /* private: Rectangles in plane damage blob. */ + const struct drm_rect *clips; + /* private: Number of rectangles in plane damage blob. */ + uint32_t num_clips; + /* private: Current clip iterator is advancing on. */ + uint32_t curr_clip; + /* private: Whether need full plane update. */ + bool full_update; +}; + +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); +void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, + struct drm_plane_state *plane_state); +int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips); +void +drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, + const struct drm_plane_state *old_state, + const struct drm_plane_state *new_state); +bool +drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, + struct drm_rect *rect); +bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, + struct drm_plane_state *state, + struct drm_rect *rect); + +/** + * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect. + * @state: Plane state. + * + * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect + * can be obtained by simply typecasting &drm_mode_rect. This is because both + * are signed 32 and during drm_atomic_check_only() it is verified that damage + * clips are inside fb. + * + * Return: Clips in plane fb_damage_clips blob property. + */ +static inline struct drm_rect * +drm_helper_get_plane_damage_clips(const struct drm_plane_state *state) +{ + return (struct drm_rect *)drm_plane_get_damage_clips(state); +} + +#endif diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h new file mode 100644 index 000000000..2188dc839 --- /dev/null +++ b/include/drm/drm_debugfs.h @@ -0,0 +1,101 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DEBUGFS_H_ +#define _DRM_DEBUGFS_H_ + +#include <linux/types.h> +#include <linux/seq_file.h> +/** + * struct drm_info_list - debugfs info list entry + * + * This structure represents a debugfs file to be created by the drm + * core. + */ +struct drm_info_list { + /** @name: file name */ + const char *name; + /** + * @show: + * + * Show callback. &seq_file->private will be set to the &struct + * drm_info_node corresponding to the instance of this info on a given + * &struct drm_minor. + */ + int (*show)(struct seq_file*, void*); + /** @driver_features: Required driver features for this entry */ + u32 driver_features; + /** @data: Driver-private data, should not be device-specific. */ + void *data; +}; + +/** + * struct drm_info_node - Per-minor debugfs node structure + * + * This structure represents a debugfs file, as an instantiation of a &struct + * drm_info_list on a &struct drm_minor. + * + * FIXME: + * + * No it doesn't make a hole lot of sense that we duplicate debugfs entries for + * both the render and the primary nodes, but that's how this has organically + * grown. It should probably be fixed, with a compatibility link, if needed. + */ +struct drm_info_node { + /** @minor: &struct drm_minor for this node. */ + struct drm_minor *minor; + /** @info_ent: template for this node. */ + const struct drm_info_list *info_ent; + /* private: */ + struct list_head list; + struct dentry *dent; +}; + +#if defined(CONFIG_DEBUG_FS) +void drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor); +#else +static inline void drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor) +{} + +static inline int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor) +{ + return 0; +} +#endif + +#endif /* _DRM_DEBUGFS_H_ */ diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h new file mode 100644 index 000000000..b225eeb30 --- /dev/null +++ b/include/drm/drm_debugfs_crc.h @@ -0,0 +1,74 @@ +/* + * Copyright © 2016 Collabora Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_DEBUGFS_CRC_H__ +#define __DRM_DEBUGFS_CRC_H__ + +#define DRM_MAX_CRC_NR 10 + +/** + * struct drm_crtc_crc_entry - entry describing a frame's content + * @has_frame_counter: whether the source was able to provide a frame number + * @frame: number of the frame this CRC is about, if @has_frame_counter is true + * @crc: array of values that characterize the frame + */ +struct drm_crtc_crc_entry { + bool has_frame_counter; + uint32_t frame; + uint32_t crcs[DRM_MAX_CRC_NR]; +}; + +#define DRM_CRC_ENTRIES_NR 128 + +/** + * struct drm_crtc_crc - data supporting CRC capture on a given CRTC + * @lock: protects the fields in this struct + * @source: name of the currently configured source of CRCs + * @opened: whether userspace has opened the data file for reading + * @overflow: whether an overflow occured. + * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR + * @head: head of circular queue + * @tail: tail of circular queue + * @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR + * @wq: workqueue used to synchronize reading and writing + */ +struct drm_crtc_crc { + spinlock_t lock; + const char *source; + bool opened, overflow; + struct drm_crtc_crc_entry *entries; + int head, tail; + size_t values_cnt; + wait_queue_head_t wq; +}; + +#if defined(CONFIG_DEBUG_FS) +int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs); +#else +static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, + uint32_t frame, uint32_t *crcs) +{ + return -EINVAL; +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* __DRM_DEBUGFS_CRC_H__ */ diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h new file mode 100644 index 000000000..f4f68e7a9 --- /dev/null +++ b/include/drm/drm_device.h @@ -0,0 +1,384 @@ +#ifndef _DRM_DEVICE_H_ +#define _DRM_DEVICE_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/idr.h> + +#include <drm/drm_hashtab.h> +#include <drm/drm_mode_config.h> + +struct drm_driver; +struct drm_minor; +struct drm_master; +struct drm_device_dma; +struct drm_vblank_crtc; +struct drm_sg_mem; +struct drm_local_map; +struct drm_vma_offset_manager; +struct drm_vram_mm; +struct drm_fb_helper; + +struct inode; + +struct pci_dev; +struct pci_controller; + + +/** + * enum drm_switch_power - power state of drm device + */ + +enum switch_power_state { + /** @DRM_SWITCH_POWER_ON: Power state is ON */ + DRM_SWITCH_POWER_ON = 0, + + /** @DRM_SWITCH_POWER_OFF: Power state is OFF */ + DRM_SWITCH_POWER_OFF = 1, + + /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */ + DRM_SWITCH_POWER_CHANGING = 2, + + /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */ + DRM_SWITCH_POWER_DYNAMIC_OFF = 3, +}; + +/** + * struct drm_device - DRM device structure + * + * This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + /** + * @legacy_dev_list: + * + * List of devices per driver for stealth attach cleanup + */ + struct list_head legacy_dev_list; + + /** @if_version: Highest interface version set */ + int if_version; + + /** @ref: Object ref-count */ + struct kref ref; + + /** @dev: Device structure of bus-device */ + struct device *dev; + + /** + * @managed: + * + * Managed resources linked to the lifetime of this &drm_device as + * tracked by @ref. + */ + struct { + /** @managed.resources: managed resources list */ + struct list_head resources; + /** @managed.final_kfree: pointer for final kfree() call */ + void *final_kfree; + /** @managed.lock: protects @managed.resources */ + spinlock_t lock; + } managed; + + /** @driver: DRM driver managing the device */ + struct drm_driver *driver; + + /** + * @dev_private: + * + * DRM driver private data. This is deprecated and should be left set to + * NULL. + * + * Instead of using this pointer it is recommended that drivers use + * devm_drm_dev_alloc() and embed struct &drm_device in their larger + * per-device structure. + */ + void *dev_private; + + /** @primary: Primary node */ + struct drm_minor *primary; + + /** @render: Render node */ + struct drm_minor *render; + + /** + * @registered: + * + * Internally used by drm_dev_register() and drm_connector_register(). + */ + bool registered; + + /** + * @master: + * + * Currently active master for this device. + * Protected by &master_mutex + */ + struct drm_master *master; + + /** + * @driver_features: per-device driver features + * + * Drivers can clear specific flags here to disallow + * certain features on a per-device basis while still + * sharing a single &struct drm_driver instance across + * all devices. + */ + u32 driver_features; + + /** + * @unplugged: + * + * Flag to tell if the device has been unplugged. + * See drm_dev_enter() and drm_dev_is_unplugged(). + */ + bool unplugged; + + /** @anon_inode: inode for private address-space */ + struct inode *anon_inode; + + /** @unique: Unique name of the device */ + char *unique; + + /** + * @struct_mutex: + * + * Lock for others (not &drm_minor.master and &drm_file.is_master) + * + * WARNING: + * Only drivers annotated with DRIVER_LEGACY should be using this. + */ + struct mutex struct_mutex; + + /** + * @master_mutex: + * + * Lock for &drm_minor.master and &drm_file.is_master + */ + struct mutex master_mutex; + + /** + * @open_count: + * + * Usage counter for outstanding files open, + * protected by drm_global_mutex + */ + atomic_t open_count; + + /** @filelist_mutex: Protects @filelist. */ + struct mutex filelist_mutex; + /** + * @filelist: + * + * List of userspace clients, linked through &drm_file.lhead. + */ + struct list_head filelist; + + /** + * @filelist_internal: + * + * List of open DRM files for in-kernel clients. + * Protected by &filelist_mutex. + */ + struct list_head filelist_internal; + + /** + * @clientlist_mutex: + * + * Protects &clientlist access. + */ + struct mutex clientlist_mutex; + + /** + * @clientlist: + * + * List of in-kernel clients. Protected by &clientlist_mutex. + */ + struct list_head clientlist; + + /** + * @irq_enabled: + * + * Indicates that interrupt handling is enabled, specifically vblank + * handling. Drivers which don't use drm_irq_install() need to set this + * to true manually. + */ + bool irq_enabled; + + /** + * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers. + */ + int irq; + + /** + * @vblank_disable_immediate: + * + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. + */ + bool vblank_disable_immediate; + + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ + struct drm_vblank_crtc *vblank; + + /** + * @vblank_time_lock: + * + * Protects vblank count and time updates during vblank enable/disable + */ + spinlock_t vblank_time_lock; + /** + * @vbl_lock: Top-level vblank references lock, wraps the low-level + * @vblank_time_lock. + */ + spinlock_t vbl_lock; + + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the statically configured device wide maximum. The driver + * can instead choose to use a runtime configurable per-crtc value + * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count + * must be left at zero. See drm_crtc_set_max_vblank_count() on how + * to use the per-crtc value. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; + + /** @vblank_event_list: List of vblank events */ + struct list_head vblank_event_list; + + /** + * @event_lock: + * + * Protects @vblank_event_list and event delivery in + * general. See drm_send_event() and drm_send_event_locked(). + */ + spinlock_t event_lock; + + /** @agp: AGP data */ + struct drm_agp_head *agp; + + /** @pdev: PCI device structure */ + struct pci_dev *pdev; + +#ifdef __alpha__ + /** @hose: PCI hose, only used on ALPHA platforms. */ + struct pci_controller *hose; +#endif + /** @num_crtcs: Number of CRTCs on this device */ + unsigned int num_crtcs; + + /** @mode_config: Current mode config */ + struct drm_mode_config mode_config; + + /** @object_name_lock: GEM information */ + struct mutex object_name_lock; + + /** @object_name_idr: GEM information */ + struct idr object_name_idr; + + /** @vma_offset_manager: GEM information */ + struct drm_vma_offset_manager *vma_offset_manager; + + /** @vram_mm: VRAM MM memory manager */ + struct drm_vram_mm *vram_mm; + + /** + * @switch_power_state: + * + * Power state of the client. + * Used by drivers supporting the switcheroo driver. + * The state is maintained in the + * &vga_switcheroo_client_ops.set_gpu_state callback + */ + enum switch_power_state switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; + + /* Everything below here is for legacy driver, never use! */ + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + /* Context handle management - linked list of context handles */ + struct list_head ctxlist; + + /* Context handle management - mutex for &ctxlist */ + struct mutex ctxlist_mutex; + + /* Context handle management */ + struct idr ctx_idr; + + /* Memory management - linked list of regions */ + struct list_head maplist; + + /* Memory management - user token hash table for maps */ + struct drm_open_hash map_hash; + + /* Context handle management - list of vmas (for debugging) */ + struct list_head vmalist; + + /* Optional pointer for DMA support */ + struct drm_device_dma *dma; + + /* Context swapping flag */ + __volatile__ long context_flag; + + /* Last current context */ + int last_context; + + /* Lock for &buf_use and a few other things. */ + spinlock_t buf_lock; + + /* Usage counter for buffers in use -- cannot alloc */ + int buf_use; + + /* Buffer allocation in progress */ + atomic_t buf_alloc; + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + /* Scatter gather memory */ + struct drm_sg_mem *sg; +#endif +}; + +#endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h new file mode 100644 index 000000000..77941efb5 --- /dev/null +++ b/include/drm/drm_displayid.h @@ -0,0 +1,103 @@ +/* + * Copyright © 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef DRM_DISPLAYID_H +#define DRM_DISPLAYID_H + +#define DATA_BLOCK_PRODUCT_ID 0x00 +#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 +#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 +#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 +#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 +#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 +#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 +#define DATA_BLOCK_VESA_TIMING 0x07 +#define DATA_BLOCK_CEA_TIMING 0x08 +#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 +#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a +#define DATA_BLOCK_GP_ASCII_STRING 0x0b +#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c +#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d +#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e +#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f +#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 +#define DATA_BLOCK_TILED_DISPLAY 0x12 +#define DATA_BLOCK_CTA 0x81 + +#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f + +#define PRODUCT_TYPE_EXTENSION 0 +#define PRODUCT_TYPE_TEST 1 +#define PRODUCT_TYPE_PANEL 2 +#define PRODUCT_TYPE_MONITOR 3 +#define PRODUCT_TYPE_TV 4 +#define PRODUCT_TYPE_REPEATER 5 +#define PRODUCT_TYPE_DIRECT_DRIVE 6 + +struct displayid_hdr { + u8 rev; + u8 bytes; + u8 prod_id; + u8 ext_count; +} __packed; + +struct displayid_block { + u8 tag; + u8 rev; + u8 num_bytes; +} __packed; + +struct displayid_tiled_block { + struct displayid_block base; + u8 tile_cap; + u8 topo[3]; + u8 tile_size[4]; + u8 tile_pixel_bezel[5]; + u8 topology_id[8]; +} __packed; + +struct displayid_detailed_timings_1 { + u8 pixel_clock[3]; + u8 flags; + u8 hactive[2]; + u8 hblank[2]; + u8 hsync[2]; + u8 hsw[2]; + u8 vactive[2]; + u8 vblank[2]; + u8 vsync[2]; + u8 vsw[2]; +} __packed; + +struct displayid_detailed_timing_block { + struct displayid_block base; + struct displayid_detailed_timings_1 timings[]; +}; + +#define for_each_displayid_db(displayid, block, idx, length) \ + for ((block) = (struct displayid_block *)&(displayid)[idx]; \ + (idx) + sizeof(struct displayid_block) <= (length) && \ + (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \ + (block)->num_bytes > 0; \ + (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \ + (block) = (struct displayid_block *)&(displayid)[idx]) + +#endif diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h new file mode 100644 index 000000000..4c42db81f --- /dev/null +++ b/include/drm/drm_dp_dual_mode_helper.h @@ -0,0 +1,119 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_DP_DUAL_MODE_HELPER_H +#define DRM_DP_DUAL_MODE_HELPER_H + +#include <linux/types.h> + +/* + * Optional for type 1 DVI adaptors + * Mandatory for type 1 HDMI and type 2 adaptors + */ +#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ +#define DP_DUAL_MODE_HDMI_ID_LEN 16 +/* + * Optional for type 1 adaptors + * Mandatory for type 2 adaptors + */ +#define DP_DUAL_MODE_ADAPTOR_ID 0x10 +#define DP_DUAL_MODE_REV_MASK 0x07 +#define DP_DUAL_MODE_REV_TYPE2 0x00 +#define DP_DUAL_MODE_TYPE_MASK 0xf0 +#define DP_DUAL_MODE_TYPE_TYPE2 0xa0 +/* This field is marked reserved in dual mode spec, used in LSPCON */ +#define DP_DUAL_MODE_TYPE_HAS_DPCD 0x08 +#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ +#define DP_DUAL_IEEE_OUI_LEN 3 +#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ +#define DP_DUAL_DEVICE_ID_LEN 6 +#define DP_DUAL_MODE_HARDWARE_REV 0x1a +#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b +#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c +#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d +#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e +#define DP_DUAL_MODE_TMDS_OEN 0x20 +#define DP_DUAL_MODE_TMDS_DISABLE 0x01 +#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 +#define DP_DUAL_MODE_CEC_ENABLE 0x01 +#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 + +/* LSPCON specific registers, defined by MCA */ +#define DP_DUAL_MODE_LSPCON_MODE_CHANGE 0x40 +#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41 +#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1 + +struct i2c_adapter; + +ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, + u8 offset, void *buffer, size_t size); +ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, + u8 offset, const void *buffer, size_t size); + +/** + * enum drm_lspcon_mode + * @DRM_LSPCON_MODE_INVALID: No LSPCON. + * @DRM_LSPCON_MODE_LS: Level shifter mode of LSPCON + * which drives DP++ to HDMI 1.4 conversion. + * @DRM_LSPCON_MODE_PCON: Protocol converter mode of LSPCON + * which drives DP++ to HDMI 2.0 active conversion. + */ +enum drm_lspcon_mode { + DRM_LSPCON_MODE_INVALID, + DRM_LSPCON_MODE_LS, + DRM_LSPCON_MODE_PCON, +}; + +/** + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor + * @DRM_DP_DUAL_MODE_LSPCON: Level shifter / protocol converter + */ +enum drm_dp_dual_mode_type { + DRM_DP_DUAL_MODE_NONE, + DRM_DP_DUAL_MODE_UNKNOWN, + DRM_DP_DUAL_MODE_TYPE1_DVI, + DRM_DP_DUAL_MODE_TYPE1_HDMI, + DRM_DP_DUAL_MODE_TYPE2_DVI, + DRM_DP_DUAL_MODE_TYPE2_HDMI, + DRM_DP_DUAL_MODE_LSPCON, +}; + +enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); +int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter); +int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool *enabled); +int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, + struct i2c_adapter *adapter, bool enable); +const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); + +int drm_lspcon_get_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode *current_mode); +int drm_lspcon_set_mode(struct i2c_adapter *adapter, + enum drm_lspcon_mode reqd_mode); +#endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h new file mode 100644 index 000000000..881d329ce --- /dev/null +++ b/include/drm/drm_dp_helper.h @@ -0,0 +1,1856 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/types.h> +#include <drm/drm_connector.h> + +struct drm_device; + +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * MST: Multistream Transport - part of DP 1.2a + * + * 1.2 formally includes both eDP and DPI definitions. + */ + +/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */ +#define DP_MSA_MISC_SYNC_CLOCK (1 << 0) +#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8) +#define DP_MSA_MISC_STEREO_NO_3D (0 << 9) +#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9) +#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9) +/* bits per component for non-RAW */ +#define DP_MSA_MISC_6_BPC (0 << 5) +#define DP_MSA_MISC_8_BPC (1 << 5) +#define DP_MSA_MISC_10_BPC (2 << 5) +#define DP_MSA_MISC_12_BPC (3 << 5) +#define DP_MSA_MISC_16_BPC (4 << 5) +/* bits per component for RAW */ +#define DP_MSA_MISC_RAW_6_BPC (1 << 5) +#define DP_MSA_MISC_RAW_7_BPC (2 << 5) +#define DP_MSA_MISC_RAW_8_BPC (3 << 5) +#define DP_MSA_MISC_RAW_10_BPC (4 << 5) +#define DP_MSA_MISC_RAW_12_BPC (5 << 5) +#define DP_MSA_MISC_RAW_14_BPC (6 << 5) +#define DP_MSA_MISC_RAW_16_BPC (7 << 5) +/* pixel encoding/colorimetry format */ +#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \ + ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1)) +#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0) +#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0) +#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1) +#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0) +#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0) +#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0) +#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1) +#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1) +#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0) +#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1) +#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14) + +#define DP_AUX_MAX_PAYLOAD_BYTES 16 + +#define DP_AUX_I2C_WRITE 0x0 +#define DP_AUX_I2C_READ 0x1 +#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2 +#define DP_AUX_I2C_MOT 0x4 +#define DP_AUX_NATIVE_WRITE 0x8 +#define DP_AUX_NATIVE_READ 0x9 + +#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) +#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) +#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) +#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) + +#define DP_AUX_I2C_REPLY_ACK (0x0 << 2) +#define DP_AUX_I2C_REPLY_NACK (0x1 << 2) +#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) +#define DP_AUX_I2C_REPLY_MASK (0x3 << 2) + +/* AUX CH addresses */ +/* DPCD */ +#define DP_DPCD_REV 0x000 +# define DP_DPCD_REV_10 0x10 +# define DP_DPCD_REV_11 0x11 +# define DP_DPCD_REV_12 0x12 +# define DP_DPCD_REV_13 0x13 +# define DP_DPCD_REV_14 0x14 + +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_MAX_DOWNSPREAD_0_5 (1 << 0) +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) +# define DP_TPS4_SUPPORTED (1 << 7) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) +# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) +# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) +# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) +# define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 +# define DP_CAP_ANSI_8B10B (1 << 0) + +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) + +#define DP_RECEIVE_PORT_0_CAP_0 0x008 +# define DP_LOCAL_EDID_PRESENT (1 << 1) +# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) + +#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 + +#define DP_RECEIVE_PORT_1_CAP_0 0x00a +#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b + +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0) +# define DP_FRAMING_CHANGE_CAP (1 << 1) +# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ + +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */ +# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */ + +#define DP_ADAPTER_CAP 0x00f /* 1.2 */ +# define DP_FORCE_LOAD_SENSE_CAP (1 << 0) +# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1) + +#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */ +# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */ + +/* Multiple stream transport */ +#define DP_FAUX_CAP 0x020 /* 1.2 */ +# define DP_FAUX_CAP_1 (1 << 0) + +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) + +#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ + +/* AV_SYNC_DATA_BLOCK 1.2 */ +#define DP_AV_GRANULARITY 0x023 +# define DP_AG_FACTOR_MASK (0xf << 0) +# define DP_AG_FACTOR_3MS (0 << 0) +# define DP_AG_FACTOR_2MS (1 << 0) +# define DP_AG_FACTOR_1MS (2 << 0) +# define DP_AG_FACTOR_500US (3 << 0) +# define DP_AG_FACTOR_200US (4 << 0) +# define DP_AG_FACTOR_100US (5 << 0) +# define DP_AG_FACTOR_10US (6 << 0) +# define DP_AG_FACTOR_1US (7 << 0) +# define DP_VG_FACTOR_MASK (0xf << 4) +# define DP_VG_FACTOR_3MS (0 << 4) +# define DP_VG_FACTOR_2MS (1 << 4) +# define DP_VG_FACTOR_1MS (2 << 4) +# define DP_VG_FACTOR_500US (3 << 4) +# define DP_VG_FACTOR_200US (4 << 4) +# define DP_VG_FACTOR_100US (5 << 4) + +#define DP_AUD_DEC_LAT0 0x024 +#define DP_AUD_DEC_LAT1 0x025 + +#define DP_AUD_PP_LAT0 0x026 +#define DP_AUD_PP_LAT1 0x027 + +#define DP_VID_INTER_LAT 0x028 + +#define DP_VID_PROG_LAT 0x029 + +#define DP_REP_LAT 0x02a + +#define DP_AUD_DEL_INS0 0x02b +#define DP_AUD_DEL_INS1 0x02c +#define DP_AUD_DEL_INS2 0x02d +/* End of AV_SYNC_DATA_BLOCK */ + +#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ +# define DP_ALPM_CAP (1 << 0) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_CAP (1 << 0) + +#define DP_GUID 0x030 /* 1.2 */ + +#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */ +# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_REV 0x061 +# define DP_DSC_MAJOR_MASK (0xf << 0) +# define DP_DSC_MINOR_MASK (0xf << 4) +# define DP_DSC_MAJOR_SHIFT 0 +# define DP_DSC_MINOR_SHIFT 4 + +#define DP_DSC_RC_BUF_BLK_SIZE 0x062 +# define DP_DSC_RC_BUF_BLK_SIZE_1 0x0 +# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1 +# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2 +# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3 + +#define DP_DSC_RC_BUF_SIZE 0x063 + +#define DP_DSC_SLICE_CAP_1 0x064 +# define DP_DSC_1_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_2_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_4_PER_DP_DSC_SINK (1 << 3) +# define DP_DSC_6_PER_DP_DSC_SINK (1 << 4) +# define DP_DSC_8_PER_DP_DSC_SINK (1 << 5) +# define DP_DSC_10_PER_DP_DSC_SINK (1 << 6) +# define DP_DSC_12_PER_DP_DSC_SINK (1 << 7) + +#define DP_DSC_LINE_BUF_BIT_DEPTH 0x065 +# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK (0xf << 0) +# define DP_DSC_LINE_BUF_BIT_DEPTH_9 0x0 +# define DP_DSC_LINE_BUF_BIT_DEPTH_10 0x1 +# define DP_DSC_LINE_BUF_BIT_DEPTH_11 0x2 +# define DP_DSC_LINE_BUF_BIT_DEPTH_12 0x3 +# define DP_DSC_LINE_BUF_BIT_DEPTH_13 0x4 +# define DP_DSC_LINE_BUF_BIT_DEPTH_14 0x5 +# define DP_DSC_LINE_BUF_BIT_DEPTH_15 0x6 +# define DP_DSC_LINE_BUF_BIT_DEPTH_16 0x7 +# define DP_DSC_LINE_BUF_BIT_DEPTH_8 0x8 + +#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066 +# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0) + +#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ + +#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8 + +#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 +# define DP_DSC_RGB (1 << 0) +# define DP_DSC_YCbCr444 (1 << 1) +# define DP_DSC_YCbCr422_Simple (1 << 2) +# define DP_DSC_YCbCr422_Native (1 << 3) +# define DP_DSC_YCbCr420_Native (1 << 4) + +#define DP_DSC_DEC_COLOR_DEPTH_CAP 0x06A +# define DP_DSC_8_BPC (1 << 1) +# define DP_DSC_10_BPC (1 << 2) +# define DP_DSC_12_BPC (1 << 3) + +#define DP_DSC_PEAK_THROUGHPUT 0x06B +# define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) +# define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 +# define DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_500 (4 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_550 (5 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_600 (6 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_650 (7 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_700 (8 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_750 (9 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_800 (10 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_850 (11 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */ +# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) +# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 +# define DP_DSC_THROUGHPUT_MODE_1_UNSUPPORTED 0 +# define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_500 (4 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_550 (5 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_600 (6 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_650 (7 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_700 (8 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_750 (9 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_800 (10 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_850 (11 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4) + +#define DP_DSC_MAX_SLICE_WIDTH 0x06C +#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 +#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320 + +#define DP_DSC_SLICE_CAP_2 0x06D +# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) +# define DP_DSC_20_PER_DP_DSC_SINK (1 << 1) +# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2) + +#define DP_DSC_BITS_PER_PIXEL_INC 0x06F +# define DP_DSC_BITS_PER_PIXEL_1_16 0x0 +# define DP_DSC_BITS_PER_PIXEL_1_8 0x1 +# define DP_DSC_BITS_PER_PIXEL_1_4 0x2 +# define DP_DSC_BITS_PER_PIXEL_1_2 0x3 +# define DP_DSC_BITS_PER_PIXEL_1 0x4 + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ +# define DP_PSR_IS_SUPPORTED 1 +# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ +# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */ + +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ +# define DP_PSR_NO_TRAIN_ON_EXIT 1 +# define DP_PSR_SETUP_TIME_330 (0 << 1) +# define DP_PSR_SETUP_TIME_275 (1 << 1) +# define DP_PSR_SETUP_TIME_220 (2 << 1) +# define DP_PSR_SETUP_TIME_165 (3 << 1) +# define DP_PSR_SETUP_TIME_110 (4 << 1) +# define DP_PSR_SETUP_TIME_55 (5 << 1) +# define DP_PSR_SETUP_TIME_0 (6 << 1) +# define DP_PSR_SETUP_TIME_MASK (7 << 1) +# define DP_PSR_SETUP_TIME_SHIFT 1 +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ + +#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */ +#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */ + +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_TYPE_DP_DUALMODE 5 +# define DP_DS_PORT_TYPE_WIRELESS 6 +# define DP_DS_PORT_HPD (1 << 3) +# define DP_DS_NON_EDID_MASK (0xf << 4) +# define DP_DS_NON_EDID_720x480i_60 (1 << 4) +# define DP_DS_NON_EDID_720x480i_50 (2 << 4) +# define DP_DS_NON_EDID_1920x1080i_60 (3 << 4) +# define DP_DS_NON_EDID_1920x1080i_50 (4 << 4) +# define DP_DS_NON_EDID_1280x720_60 (5 << 4) +# define DP_DS_NON_EDID_1280x720_50 (7 << 4) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */ +/* offset 2 for VGA/DVI/HDMI */ +# define DP_DS_MAX_BPC_MASK (3 << 0) +# define DP_DS_8BPC 0 +# define DP_DS_10BPC 1 +# define DP_DS_12BPC 2 +# define DP_DS_16BPC 3 +/* offset 3 for DVI */ +# define DP_DS_DVI_DUAL_LINK (1 << 1) +# define DP_DS_DVI_HIGH_COLOR_DEPTH (1 << 2) +/* offset 3 for HDMI */ +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0) +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH (1 << 1) +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH (1 << 2) +# define DP_DS_HDMI_YCBCR444_TO_422_CONV (1 << 3) +# define DP_DS_HDMI_YCBCR444_TO_420_CONV (1 << 4) + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +/* DP Forward error Correction Registers */ +#define DP_FEC_CAPABILITY 0x090 /* 1.4 */ +# define DP_FEC_CAPABLE (1 << 0) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) +# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) + +/* DP Extended DSC Capabilities */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 +#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 + +/* link configuration */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ +# define DP_LINK_BW_8_1 0x1e /* 1.4 */ + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ +# define DP_TRAINING_PATTERN_4 7 /* 1.4 */ +# define DP_TRAINING_PATTERN_MASK 0x3 +# define DP_TRAINING_PATTERN_MASK_1_4 0xf + +/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ +# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0) +# define DP_FRAMING_CHANGE_ENABLE (1 << 1) +# define DP_PANEL_SELF_TEST_ENABLE (1 << 7) + +#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */ +#define DP_LINK_QUAL_LANE1_SET 0x10c +#define DP_LINK_QUAL_LANE2_SET 0x10d +#define DP_LINK_QUAL_LANE3_SET 0x10e +# define DP_LINK_QUAL_PATTERN_DISABLE 0 +# define DP_LINK_QUAL_PATTERN_D10_2 1 +# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 +# define DP_LINK_QUAL_PATTERN_PRBS7 3 +# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 +# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5 +# define DP_LINK_QUAL_PATTERN_MASK 7 + +#define DP_TRAINING_LANE0_1_SET2 0x10f +#define DP_TRAINING_LANE2_3_SET2 0x110 +# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0) +# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2) +# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4) +# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6) + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_AUDIO_DELAY0 0x112 /* 1.2 */ +#define DP_AUDIO_DELAY1 0x113 +#define DP_AUDIO_DELAY2 0x114 + +#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */ +# define DP_LINK_RATE_SET_SHIFT 0 +# define DP_LINK_RATE_SET_MASK (7 << 0) + +#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ +# define DP_ALPM_ENABLE (1 << 0) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) +# define DP_IRQ_HPD_ENABLE (1 << 1) + +#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ +# define DP_PWR_NOT_NEEDED (1 << 0) + +#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */ +# define DP_FEC_READY (1 << 0) +# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1) +# define DP_FEC_ERR_COUNT_DIS (0 << 1) +# define DP_FEC_UNCORR_BLK_ERROR_COUNT (1 << 1) +# define DP_FEC_CORR_BLK_ERROR_COUNT (2 << 1) +# define DP_FEC_BIT_ERROR_COUNT (3 << 1) +# define DP_FEC_LANE_SELECT_MASK (3 << 4) +# define DP_FEC_LANE_0_SELECT (0 << 4) +# define DP_FEC_LANE_1_SELECT (1 << 4) +# define DP_FEC_LANE_2_SELECT (2 << 4) +# define DP_FEC_LANE_3_SELECT (3 << 4) + +#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_VALID (1 << 0) + +#define DP_DSC_ENABLE 0x160 /* DP 1.4 */ +# define DP_DECOMPRESSION_EN (1 << 0) + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE (1 << 0) +# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) +# define DP_PSR_CRC_VERIFICATION (1 << 2) +# define DP_PSR_FRAME_CAPTURE (1 << 3) +# define DP_PSR_SELECTIVE_UPDATE (1 << 4) +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ + +#define DP_ADAPTER_CTRL 0x1a0 +# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) + +#define DP_BRANCH_DEVICE_CTRL 0x1a1 +# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) + +#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 +#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 +#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 + +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 +# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) +# define DP_AUTOMATED_TEST_REQUEST (1 << 1) +# define DP_CP_IRQ (1 << 2) +# define DP_MCCS_IRQ (1 << 3) +# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ +# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ +# define DP_SINK_SPECIFIC_IRQ (1 << 6) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c +# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03 +# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0 +# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c +# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2 +# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30 +# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4 +# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0 +# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6 + +#define DP_TEST_REQUEST 0x218 +# define DP_TEST_LINK_TRAINING (1 << 0) +# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) +# define DP_TEST_LINK_EDID_READ (1 << 2) +# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ +# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */ +# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */ + +#define DP_TEST_LINK_RATE 0x219 +# define DP_LINK_RATE_162 (0x6) +# define DP_LINK_RATE_27 (0xa) + +#define DP_TEST_LANE_COUNT 0x220 + +#define DP_TEST_PATTERN 0x221 +# define DP_NO_TEST_PATTERN 0x0 +# define DP_COLOR_RAMP 0x1 +# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2 +# define DP_COLOR_SQUARE 0x3 + +#define DP_TEST_H_TOTAL_HI 0x222 +#define DP_TEST_H_TOTAL_LO 0x223 + +#define DP_TEST_V_TOTAL_HI 0x224 +#define DP_TEST_V_TOTAL_LO 0x225 + +#define DP_TEST_H_START_HI 0x226 +#define DP_TEST_H_START_LO 0x227 + +#define DP_TEST_V_START_HI 0x228 +#define DP_TEST_V_START_LO 0x229 + +#define DP_TEST_HSYNC_HI 0x22A +# define DP_TEST_HSYNC_POLARITY (1 << 7) +# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_HSYNC_WIDTH_LO 0x22B + +#define DP_TEST_VSYNC_HI 0x22C +# define DP_TEST_VSYNC_POLARITY (1 << 7) +# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0) +#define DP_TEST_VSYNC_WIDTH_LO 0x22D + +#define DP_TEST_H_WIDTH_HI 0x22E +#define DP_TEST_H_WIDTH_LO 0x22F + +#define DP_TEST_V_HEIGHT_HI 0x230 +#define DP_TEST_V_HEIGHT_LO 0x231 + +#define DP_TEST_MISC0 0x232 +# define DP_TEST_SYNC_CLOCK (1 << 0) +# define DP_TEST_COLOR_FORMAT_MASK (3 << 1) +# define DP_TEST_COLOR_FORMAT_SHIFT 1 +# define DP_COLOR_FORMAT_RGB (0 << 1) +# define DP_COLOR_FORMAT_YCbCr422 (1 << 1) +# define DP_COLOR_FORMAT_YCbCr444 (2 << 1) +# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3) +# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) +# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) +# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4) +# define DP_TEST_BIT_DEPTH_MASK (7 << 5) +# define DP_TEST_BIT_DEPTH_SHIFT 5 +# define DP_TEST_BIT_DEPTH_6 (0 << 5) +# define DP_TEST_BIT_DEPTH_8 (1 << 5) +# define DP_TEST_BIT_DEPTH_10 (2 << 5) +# define DP_TEST_BIT_DEPTH_12 (3 << 5) +# define DP_TEST_BIT_DEPTH_16 (4 << 5) + +#define DP_TEST_MISC1 0x233 +# define DP_TEST_REFRESH_DENOMINATOR (1 << 0) +# define DP_TEST_INTERLACED (1 << 1) + +#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234 + +#define DP_TEST_MISC0 0x232 + +#define DP_TEST_CRC_R_CR 0x240 +#define DP_TEST_CRC_G_Y 0x242 +#define DP_TEST_CRC_B_CB 0x244 + +#define DP_TEST_SINK_MISC 0x246 +# define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_COUNT_MASK 0xf + +#define DP_PHY_TEST_PATTERN 0x248 +# define DP_PHY_TEST_PATTERN_SEL_MASK 0x7 +# define DP_PHY_TEST_PATTERN_NONE 0x0 +# define DP_PHY_TEST_PATTERN_D10_2 0x1 +# define DP_PHY_TEST_PATTERN_ERROR_COUNT 0x2 +# define DP_PHY_TEST_PATTERN_PRBS7 0x3 +# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4 +# define DP_PHY_TEST_PATTERN_CP2520 0x5 + +#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A +#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 +#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 +#define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 +#define DP_TEST_80BIT_CUSTOM_PATTERN_31_24 0x253 +#define DP_TEST_80BIT_CUSTOM_PATTERN_39_32 0x254 +#define DP_TEST_80BIT_CUSTOM_PATTERN_47_40 0x255 +#define DP_TEST_80BIT_CUSTOM_PATTERN_55_48 0x256 +#define DP_TEST_80BIT_CUSTOM_PATTERN_63_56 0x257 +#define DP_TEST_80BIT_CUSTOM_PATTERN_71_64 0x258 +#define DP_TEST_80BIT_CUSTOM_PATTERN_79_72 0x259 + +#define DP_TEST_RESPONSE 0x260 +# define DP_TEST_ACK (1 << 0) +# define DP_TEST_NAK (1 << 1) +# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) + +#define DP_TEST_EDID_CHECKSUM 0x261 + +#define DP_TEST_SINK 0x270 +# define DP_TEST_SINK_START (1 << 0) +#define DP_TEST_AUDIO_MODE 0x271 +#define DP_TEST_AUDIO_PATTERN_TYPE 0x272 +#define DP_TEST_AUDIO_PERIOD_CH1 0x273 +#define DP_TEST_AUDIO_PERIOD_CH2 0x274 +#define DP_TEST_AUDIO_PERIOD_CH3 0x275 +#define DP_TEST_AUDIO_PERIOD_CH4 0x276 +#define DP_TEST_AUDIO_PERIOD_CH5 0x277 +#define DP_TEST_AUDIO_PERIOD_CH6 0x278 +#define DP_TEST_AUDIO_PERIOD_CH7 0x279 +#define DP_TEST_AUDIO_PERIOD_CH8 0x27A + +#define DP_FEC_STATUS 0x280 /* 1.4 */ +# define DP_FEC_DECODE_EN_DETECTED (1 << 0) +# define DP_FEC_DECODE_DIS_DETECTED (1 << 1) + +#define DP_FEC_ERROR_COUNT_LSB 0x0281 /* 1.4 */ + +#define DP_FEC_ERROR_COUNT_MSB 0x0282 /* 1.4 */ +# define DP_FEC_ERROR_COUNT_MASK 0x7F +# define DP_FEC_ERR_COUNT_VALID (1 << 7) + +#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ +# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) +# define DP_PAYLOAD_ACT_HANDLED (1 << 1) + +#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ +/* up to ID_SLOT_63 at 0x2ff */ + +#define DP_SOURCE_OUI 0x300 +#define DP_SINK_OUI 0x400 +#define DP_BRANCH_OUI 0x500 +#define DP_BRANCH_ID 0x503 +#define DP_BRANCH_REVISION_START 0x509 +#define DP_BRANCH_HW_REV 0x509 +#define DP_BRANCH_SW_REV 0x50A + +#define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 +# define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 + +#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ +# define DP_EDP_11 0x00 +# define DP_EDP_12 0x01 +# define DP_EDP_13 0x02 +# define DP_EDP_14 0x03 +# define DP_EDP_14a 0x04 /* eDP 1.4a */ +# define DP_EDP_14b 0x05 /* eDP 1.4b */ + +#define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) + +#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) + +#define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) + +#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 + +#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) + +#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ + +#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 +#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 + +#define DP_EDP_PWMGEN_BIT_COUNT 0x724 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 +# define DP_EDP_PWMGEN_BIT_COUNT_MASK (0x1f << 0) + +#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 + +#define DP_EDP_BACKLIGHT_FREQ_SET 0x728 +# define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ 27000 + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f + +#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 +#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 + +#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ +#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ + +#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ + +#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ +/* 0-5 sink count */ +# define DP_SINK_COUNT_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ +# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) +# define DP_LOCK_ACQUISITION_REQUEST (1 << 1) +# define DP_CEC_IRQ (1 << 2) + +#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ + +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ +# define DP_PSR_LINK_CRC_ERROR (1 << 0) +# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) +# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */ + +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ +# define DP_PSR_CAPS_CHANGE (1 << 0) + +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ +# define DP_PSR_SINK_INACTIVE 0 +# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 +# define DP_PSR_SINK_ACTIVE_RFB 2 +# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 +# define DP_PSR_SINK_ACTIVE_RESYNC 4 +# define DP_PSR_SINK_INTERNAL_ERROR 7 +# define DP_PSR_SINK_STATE_MASK 0x07 + +#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */ +# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0) +# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0 +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) +# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 + +#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ +# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ +# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ +# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ +# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ +# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ +# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ +# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ + +#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ +# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) + +#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */ +#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ +#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ +#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ + +#define DP_DP13_DPCD_REV 0x2200 +#define DP_DP13_MAX_LINK_RATE 0x2201 + +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + +/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */ +#define DP_CEC_TUNNELING_CAPABILITY 0x3000 +# define DP_CEC_TUNNELING_CAPABLE (1 << 0) +# define DP_CEC_SNOOPING_CAPABLE (1 << 1) +# define DP_CEC_MULTIPLE_LA_CAPABLE (1 << 2) + +#define DP_CEC_TUNNELING_CONTROL 0x3001 +# define DP_CEC_TUNNELING_ENABLE (1 << 0) +# define DP_CEC_SNOOPING_ENABLE (1 << 1) + +#define DP_CEC_RX_MESSAGE_INFO 0x3002 +# define DP_CEC_RX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_RX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_RX_MESSAGE_HPD_STATE (1 << 4) +# define DP_CEC_RX_MESSAGE_HPD_LOST (1 << 5) +# define DP_CEC_RX_MESSAGE_ACKED (1 << 6) +# define DP_CEC_RX_MESSAGE_ENDED (1 << 7) + +#define DP_CEC_TX_MESSAGE_INFO 0x3003 +# define DP_CEC_TX_MESSAGE_LEN_MASK (0xf << 0) +# define DP_CEC_TX_MESSAGE_LEN_SHIFT 0 +# define DP_CEC_TX_RETRY_COUNT_MASK (0x7 << 4) +# define DP_CEC_TX_RETRY_COUNT_SHIFT 4 +# define DP_CEC_TX_MESSAGE_SEND (1 << 7) + +#define DP_CEC_TUNNELING_IRQ_FLAGS 0x3004 +# define DP_CEC_RX_MESSAGE_INFO_VALID (1 << 0) +# define DP_CEC_RX_MESSAGE_OVERFLOW (1 << 1) +# define DP_CEC_TX_MESSAGE_SENT (1 << 4) +# define DP_CEC_TX_LINE_ERROR (1 << 5) +# define DP_CEC_TX_ADDRESS_NACK_ERROR (1 << 6) +# define DP_CEC_TX_DATA_NACK_ERROR (1 << 7) + +#define DP_CEC_LOGICAL_ADDRESS_MASK 0x300E /* 0x300F word */ +# define DP_CEC_LOGICAL_ADDRESS_0 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_1 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_2 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_3 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_4 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_5 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_6 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_7 (1 << 7) +#define DP_CEC_LOGICAL_ADDRESS_MASK_2 0x300F /* 0x300E word */ +# define DP_CEC_LOGICAL_ADDRESS_8 (1 << 0) +# define DP_CEC_LOGICAL_ADDRESS_9 (1 << 1) +# define DP_CEC_LOGICAL_ADDRESS_10 (1 << 2) +# define DP_CEC_LOGICAL_ADDRESS_11 (1 << 3) +# define DP_CEC_LOGICAL_ADDRESS_12 (1 << 4) +# define DP_CEC_LOGICAL_ADDRESS_13 (1 << 5) +# define DP_CEC_LOGICAL_ADDRESS_14 (1 << 6) +# define DP_CEC_LOGICAL_ADDRESS_15 (1 << 7) + +#define DP_CEC_RX_MESSAGE_BUFFER 0x3010 +#define DP_CEC_TX_MESSAGE_BUFFER 0x3020 +#define DP_CEC_MESSAGE_BUFFER_LENGTH 0x10 + +#define DP_PROTOCOL_CONVERTER_CONTROL_0 0x3050 /* DP 1.3 */ +# define DP_HDMI_DVI_OUTPUT_CONFIG (1 << 0) /* DP 1.3 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_1 0x3051 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR420_ENABLE (1 << 0) /* DP 1.3 */ +# define DP_HDMI_EDID_PROCESSING_DISABLE (1 << 1) /* DP 1.4 */ +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE (1 << 2) /* DP 1.4 */ +# define DP_HDMI_FORCE_SCRAMBLING (1 << 3) /* DP 1.4 */ +#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */ +# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */ + +#define DP_AUX_HDCP_BKSV 0x68000 +#define DP_AUX_HDCP_RI_PRIME 0x68005 +#define DP_AUX_HDCP_AKSV 0x68007 +#define DP_AUX_HDCP_AN 0x6800C +#define DP_AUX_HDCP_V_PRIME(h) (0x68014 + h * 4) +#define DP_AUX_HDCP_BCAPS 0x68028 +# define DP_BCAPS_REPEATER_PRESENT BIT(1) +# define DP_BCAPS_HDCP_CAPABLE BIT(0) +#define DP_AUX_HDCP_BSTATUS 0x68029 +# define DP_BSTATUS_REAUTH_REQ BIT(3) +# define DP_BSTATUS_LINK_FAILURE BIT(2) +# define DP_BSTATUS_R0_PRIME_READY BIT(1) +# define DP_BSTATUS_READY BIT(0) +#define DP_AUX_HDCP_BINFO 0x6802A +#define DP_AUX_HDCP_KSV_FIFO 0x6802C +#define DP_AUX_HDCP_AINFO 0x6803B + +/* DP HDCP2.2 parameter offsets in DPCD address space */ +#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000 +#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008 +#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B +#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215 +#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D +#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220 +#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0 +#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0 +#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0 +#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0 +#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0 +#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8 +#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318 +#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328 +#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330 +#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332 +#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335 +#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345 +#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0 +#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0 +#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3 +#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5 +#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473 +#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493 +#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 +#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 + +/* Link Training (LT)-tunable PHY Repeaters */ +#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ +#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ +#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */ +#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */ +#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */ +#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */ +#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */ +#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */ +#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */ +#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */ +#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */ +#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */ +#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */ +#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */ +#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */ +#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */ +#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */ +#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ +#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ +#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ +#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ +#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ + +/* Repeater modes */ +#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */ +#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */ + +/* DP HDCP message start offsets in DPCD address space */ +#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET +#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET +#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET +#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET +#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET +#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \ + DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET +#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET +#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET +#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET +#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET +#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET +#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET +#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET + +#define HDCP_2_2_DP_RXSTATUS_LEN 1 +#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1)) +#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2)) +#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) +#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4)) + +/* DP 1.2 Sideband message defines */ +/* peer device type - DP 1.2a Table 2-92 */ +#define DP_PEER_DEVICE_NONE 0x0 +#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 +#define DP_PEER_DEVICE_MST_BRANCHING 0x2 +#define DP_PEER_DEVICE_SST_SINK 0x3 +#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 + +/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ +#define DP_GET_MSG_TRANSACTION_VERSION 0x00 /* DP 1.3 */ +#define DP_LINK_ADDRESS 0x01 +#define DP_CONNECTION_STATUS_NOTIFY 0x02 +#define DP_ENUM_PATH_RESOURCES 0x10 +#define DP_ALLOCATE_PAYLOAD 0x11 +#define DP_QUERY_PAYLOAD 0x12 +#define DP_RESOURCE_STATUS_NOTIFY 0x13 +#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 +#define DP_REMOTE_DPCD_READ 0x20 +#define DP_REMOTE_DPCD_WRITE 0x21 +#define DP_REMOTE_I2C_READ 0x22 +#define DP_REMOTE_I2C_WRITE 0x23 +#define DP_POWER_UP_PHY 0x24 +#define DP_POWER_DOWN_PHY 0x25 +#define DP_SINK_EVENT_NOTIFY 0x30 +#define DP_QUERY_STREAM_ENC_STATUS 0x38 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST 0 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE 1 +#define DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE 2 + +/* DP 1.2 MST sideband reply types */ +#define DP_SIDEBAND_REPLY_ACK 0x00 +#define DP_SIDEBAND_REPLY_NAK 0x01 + +/* DP 1.2 MST sideband nak reasons - table 2.84 */ +#define DP_NAK_WRITE_FAILURE 0x01 +#define DP_NAK_INVALID_READ 0x02 +#define DP_NAK_CRC_FAILURE 0x03 +#define DP_NAK_BAD_PARAM 0x04 +#define DP_NAK_DEFER 0x05 +#define DP_NAK_LINK_FAILURE 0x06 +#define DP_NAK_NO_RESOURCES 0x07 +#define DP_NAK_DPCD_FAIL 0x08 +#define DP_NAK_I2C_NAK 0x09 +#define DP_NAK_ALLOCATE_FAIL 0x0a + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + +#define DP_LINK_CONSTANT_N_VALUE 0x8000 +#define DP_LINK_STATUS_SIZE 6 +bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane); +u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane); + +#define DP_BRANCH_OUI_HEADER_SIZE 0xc +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ +#define EDP_PSR_RECEIVER_CAP_SIZE 2 +#define EDP_DISPLAY_CTL_CAP_SIZE 3 + +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +u8 drm_dp_link_rate_to_bw_code(int link_rate); +int drm_dp_bw_code_to_link_rate(u8 link_bw); + +#define DP_SDP_AUDIO_TIMESTAMP 0x01 +#define DP_SDP_AUDIO_STREAM 0x02 +#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */ +#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ +#define DP_SDP_ISRC 0x06 /* DP 1.2 */ +#define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ +#define DP_SDP_PPS 0x10 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ +#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ +/* 0x80+ CEA-861 infoframe types */ + +/** + * struct dp_sdp_header - DP secondary data packet header + * @HB0: Secondary Data Packet ID + * @HB1: Secondary Data Packet Type + * @HB2: Secondary Data Packet Specific header, Byte 0 + * @HB3: Secondary Data packet Specific header, Byte 1 + */ +struct dp_sdp_header { + u8 HB0; + u8 HB1; + u8 HB2; + u8 HB3; +} __packed; + +#define EDP_SDP_HEADER_REVISION_MASK 0x1F +#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F +#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F + +/** + * struct dp_sdp - DP secondary data packet + * @sdp_header: DP secondary data packet header + * @db: DP secondaray data packet data blocks + * VSC SDP Payload for PSR + * db[0]: Stereo Interface + * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid + * db[2]: CRC value bits 7:0 of the R or Cr component + * db[3]: CRC value bits 15:8 of the R or Cr component + * db[4]: CRC value bits 7:0 of the G or Y component + * db[5]: CRC value bits 15:8 of the G or Y component + * db[6]: CRC value bits 7:0 of the B or Cb component + * db[7]: CRC value bits 15:8 of the B or Cb component + * db[8] - db[31]: Reserved + * VSC SDP Payload for Pixel Encoding/Colorimetry Format + * db[0] - db[15]: Reserved + * db[16]: Pixel Encoding and Colorimetry Formats + * db[17]: Dynamic Range and Component Bit Depth + * db[18]: Content Type + * db[19] - db[31]: Reserved + */ +struct dp_sdp { + struct dp_sdp_header sdp_header; + u8 db[32]; +} __packed; + +#define EDP_VSC_PSR_STATE_ACTIVE (1<<0) +#define EDP_VSC_PSR_UPDATE_RFB (1<<1) +#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) + +/** + * enum dp_pixelformat - drm DP Pixel encoding formats + * + * This enum is used to indicate DP VSC SDP Pixel encoding formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_PIXELFORMAT_RGB: RGB pixel encoding format + * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format + * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format + * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format + * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format + * @DP_PIXELFORMAT_RAW: RAW pixel encoding format + * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format + */ +enum dp_pixelformat { + DP_PIXELFORMAT_RGB = 0, + DP_PIXELFORMAT_YUV444 = 0x1, + DP_PIXELFORMAT_YUV422 = 0x2, + DP_PIXELFORMAT_YUV420 = 0x3, + DP_PIXELFORMAT_Y_ONLY = 0x4, + DP_PIXELFORMAT_RAW = 0x5, + DP_PIXELFORMAT_RESERVED = 0x6, +}; + +/** + * enum dp_colorimetry - drm DP Colorimetry formats + * + * This enum is used to indicate DP VSC SDP Colorimetry formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * + * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or + * ITU-R BT.601 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format + * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format + * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format + * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format + * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format + * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format + * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format + * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format + * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format + * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format + * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format + */ +enum dp_colorimetry { + DP_COLORIMETRY_DEFAULT = 0, + DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1, + DP_COLORIMETRY_BT709_YCC = 0x1, + DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2, + DP_COLORIMETRY_XVYCC_601 = 0x2, + DP_COLORIMETRY_OPRGB = 0x3, + DP_COLORIMETRY_XVYCC_709 = 0x3, + DP_COLORIMETRY_DCI_P3_RGB = 0x4, + DP_COLORIMETRY_SYCC_601 = 0x4, + DP_COLORIMETRY_RGB_CUSTOM = 0x5, + DP_COLORIMETRY_OPYCC_601 = 0x5, + DP_COLORIMETRY_BT2020_RGB = 0x6, + DP_COLORIMETRY_BT2020_CYCC = 0x6, + DP_COLORIMETRY_BT2020_YCC = 0x7, +}; + +/** + * enum dp_dynamic_range - drm DP Dynamic Range + * + * This enum is used to indicate DP VSC SDP Dynamic Range. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_DYNAMIC_RANGE_VESA: VESA range + * @DP_DYNAMIC_RANGE_CTA: CTA range + */ +enum dp_dynamic_range { + DP_DYNAMIC_RANGE_VESA = 0, + DP_DYNAMIC_RANGE_CTA = 1, +}; + +/** + * enum dp_content_type - drm DP Content Type + * + * This enum is used to indicate DP VSC SDP Content Types. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * CTA-861-G defines content types and expected processing by a sink device + * + * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type + * @DP_CONTENT_TYPE_GRAPHICS: Graphics type + * @DP_CONTENT_TYPE_PHOTO: Photo type + * @DP_CONTENT_TYPE_VIDEO: Video type + * @DP_CONTENT_TYPE_GAME: Game type + */ +enum dp_content_type { + DP_CONTENT_TYPE_NOT_DEFINED = 0x00, + DP_CONTENT_TYPE_GRAPHICS = 0x01, + DP_CONTENT_TYPE_PHOTO = 0x02, + DP_CONTENT_TYPE_VIDEO = 0x03, + DP_CONTENT_TYPE_GAME = 0x04, +}; + +/** + * struct drm_dp_vsc_sdp - drm DP VSC SDP + * + * This structure represents a DP VSC SDP of drm + * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and + * [Table 2-117: VSC SDP Payload for DB16 through DB18] + * + * @sdp_type: secondary-data packet type + * @revision: revision number + * @length: number of valid data bytes + * @pixelformat: pixel encoding format + * @colorimetry: colorimetry format + * @bpc: bit per color + * @dynamic_range: dynamic range information + * @content_type: CTA-861-G defines content types and expected processing by a sink device + */ +struct drm_dp_vsc_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + enum dp_pixelformat pixelformat; + enum dp_colorimetry colorimetry; + int bpc; + enum dp_dynamic_range dynamic_range; + enum dp_content_type content_type; +}; + +void drm_dp_vsc_sdp_log(const char *level, struct device *dev, + const struct drm_dp_vsc_sdp *vsc); + +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); + +static inline int +drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); +} + +static inline u8 +drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static inline bool +drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); +} + +static inline bool +drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 && + (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + +static inline bool +drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x14 && + dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED; +} + +static inline u8 +drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 : + DP_TRAINING_PATTERN_MASK; +} + +static inline bool +drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; +} + +/* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp); +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]); + +static inline bool +drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_DECOMPRESSION_IS_SUPPORTED; +} + +static inline u16 +drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | + (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << + DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); +} + +static inline u32 +drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + /* Max Slicewidth = Number of Pixels * 320 */ + return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * + DP_DSC_SLICE_WIDTH_MULTIPLIER; +} + +/* Forward Error Correction Support on DP 1.4 */ +static inline bool +drm_dp_sink_supports_fec(const u8 fec_capable) +{ + return fec_capable & DP_FEC_CAPABLE; +} + +static inline bool +drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B; +} + +static inline bool +drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_EDP_CONFIGURATION_CAP] & + DP_ALTERNATE_SCRAMBLER_RESET_CAP; +} + +/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */ +static inline bool +drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_MSA_TIMING_PAR_IGNORED; +} + +/* + * DisplayPort AUX channel + */ + +/** + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction + * @address: address of the (first) register to access + * @request: contains the type of transaction (see DP_AUX_* macros) + * @reply: upon completion, contains the reply type of the transaction + * @buffer: pointer to a transmission or reception buffer + * @size: size of @buffer + */ +struct drm_dp_aux_msg { + unsigned int address; + u8 request; + u8 reply; + void *buffer; + size_t size; +}; + +struct cec_adapter; +struct edid; +struct drm_connector; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @connector: the connector this CEC adapter is associated with + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + struct drm_connector *connector; + struct delayed_work unregister_work; +}; + +/** + * struct drm_dp_aux - DisplayPort AUX channel + * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter + * @ddc: I2C adapter that can be used for I2C-over-AUX communication + * @dev: pointer to struct device that is the parent for this AUX channel + * @crtc: backpointer to the crtc that is currently using this AUX channel + * @hw_mutex: internal mutex used for locking transfers + * @crc_work: worker that captures CRCs for each frame + * @crc_count: counter of captured frame CRCs + * @transfer: transfers a message representing a single AUX transaction + * + * The .dev field should be set to a pointer to the device that implements + * the AUX channel. + * + * The .name field may be used to specify the name of the I2C adapter. If set to + * NULL, dev_name() of .dev will be used. + * + * Drivers provide a hardware-specific implementation of how transactions + * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg + * structure describing the transaction is passed into this function. Upon + * success, the implementation should return the number of payload bytes + * that were transferred, or a negative error-code on failure. Helpers + * propagate errors from the .transfer() function, with the exception of + * the -EBUSY error, which causes a transaction to be retried. On a short, + * helpers will return -EPROTO to make it simpler to check for failure. + * + * An AUX channel can also be used to transport I2C messages to a sink. A + * typical application of that is to access an EDID that's present in the + * sink device. The .transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C + * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers + * should call drm_dp_aux_unregister() to remove the I2C adapter. + * The I2C adapter uses long transfers by default; if a partial response is + * received, the adapter will drop down to the size given by the partial + * response for this transaction only. + * + * Note that the aux helper code assumes that the .transfer() function + * only modifies the reply field of the drm_dp_aux_msg structure. The + * retry logic and i2c helpers assume this is the case. + */ +struct drm_dp_aux { + const char *name; + struct i2c_adapter ddc; + struct device *dev; + struct drm_crtc *crtc; + struct mutex hw_mutex; + struct work_struct crc_work; + u8 crc_count; + ssize_t (*transfer)(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg); + /** + * @i2c_nack_count: Counts I2C NACKs, used for DP validation. + */ + unsigned i2c_nack_count; + /** + * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. + */ + unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; + /** + * @is_remote: Is this AUX CH actually using sideband messaging. + */ + bool is_remote; +}; + +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size); + +/** + * drm_dp_dpcd_readb() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_writeb() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns the number of bytes transferred (1) on success, or a negative + * error code on failure. + */ +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write(aux, offset, &value, 1); +} + +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]); + +bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, + u8 real_edid_checksum); + +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type); +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid); +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); +void drm_dp_downstream_debug(struct seq_file *m, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux); +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]); + +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); + +void drm_dp_remote_aux_init(struct drm_dp_aux *aux); +void drm_dp_aux_init(struct drm_dp_aux *aux); +int drm_dp_aux_register(struct drm_dp_aux *aux); +void drm_dp_aux_unregister(struct drm_dp_aux *aux); + +int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); +int drm_dp_stop_crc(struct drm_dp_aux *aux); + +struct drm_dp_dpcd_ident { + u8 oui[3]; + u8 device_id[6]; + u8 hw_rev; + u8 sw_major_rev; + u8 sw_minor_rev; +} __packed; + +/** + * struct drm_dp_desc - DP branch/sink device descriptor + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. + */ +struct drm_dp_desc { + struct drm_dp_dpcd_ident ident; + u32 quirks; +}; + +int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, + bool is_branch); +u32 drm_dp_get_edid_quirks(const struct edid *edid); + +/** + * enum drm_dp_quirk - Display Port sink/branch device specific quirks + * + * Display Port sink and branch devices in the wild have a variety of bugs, try + * to collect them here. The quirks are shared, but it's up to the drivers to + * implement workarounds for them. Note that because some devices have + * unreliable OUIDs, the EDID of sinks should also be checked for quirks using + * drm_dp_get_edid_quirks(). + */ +enum drm_dp_quirk { + /** + * @DP_DPCD_QUIRK_CONSTANT_N: + * + * The device requires main link attributes Mvid and Nvid to be limited + * to 16 bits. So will give a constant value (0x8000) for compatability. + */ + DP_DPCD_QUIRK_CONSTANT_N, + /** + * @DP_DPCD_QUIRK_NO_PSR: + * + * The device does not support PSR even if reports that it supports or + * driver still need to implement proper handling for such device. + */ + DP_DPCD_QUIRK_NO_PSR, + /** + * @DP_DPCD_QUIRK_NO_SINK_COUNT: + * + * The device does not set SINK_COUNT to a non-zero value. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. + */ + DP_DPCD_QUIRK_NO_SINK_COUNT, + /** + * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD: + * + * The device supports MST DSC despite not supporting Virtual DPCD. + * The DSC caps can be read from the physical aux instead. + */ + DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, + /** + * @DP_QUIRK_FORCE_DPCD_BACKLIGHT: + * + * The device is telling the truth when it says that it uses DPCD + * backlight controls, even if the system's firmware disagrees. This + * quirk should be checked against both the ident and panel EDID. + * When present, the driver should honor the DPCD backlight + * capabilities advertised. + */ + DP_QUIRK_FORCE_DPCD_BACKLIGHT, + /** + * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: + * + * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite + * the DP_MAX_LINK_RATE register reporting a lower max multiplier. + */ + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, +}; + +/** + * drm_dp_has_quirk() - does the DP device have a specific quirk + * @desc: Device descriptor filled by drm_dp_read_desc() + * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks() + * @quirk: Quirk to query for + * + * Return true if DP device identified by @desc has @quirk. + */ +static inline bool +drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks, + enum drm_dp_quirk quirk) +{ + return (desc->quirks | edid_quirks) & BIT(quirk); +} + +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void +drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + +/** + * struct drm_dp_phy_test_params - DP Phy Compliance parameters + * @link_rate: Requested Link rate from DPCD 0x219 + * @num_lanes: Number of lanes requested by sing through DPCD 0x220 + * @phy_pattern: DP Phy test pattern from DPCD 0x248 + * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B + * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259 + * @enhanced_frame_cap: flag for enhanced frame capability. + */ +struct drm_dp_phy_test_params { + int link_rate; + u8 num_lanes; + u8 phy_pattern; + u8 hbr2_reset[2]; + u8 custom80[10]; + bool enhanced_frame_cap; +}; + +int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data); +int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, + struct drm_dp_phy_test_params *data, u8 dp_rev); +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h new file mode 100644 index 000000000..bd1c39907 --- /dev/null +++ b/include/drm/drm_dp_mst_helper.h @@ -0,0 +1,958 @@ +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef _DRM_DP_MST_HELPER_H_ +#define _DRM_DP_MST_HELPER_H_ + +#include <linux/types.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_atomic.h> + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stackdepot.h> +#include <linux/timekeeping.h> + +enum drm_dp_mst_topology_ref_type { + DRM_DP_MST_TOPOLOGY_REF_GET, + DRM_DP_MST_TOPOLOGY_REF_PUT, +}; + +struct drm_dp_mst_topology_ref_history { + struct drm_dp_mst_topology_ref_entry { + enum drm_dp_mst_topology_ref_type type; + int count; + ktime_t ts_nsec; + depot_stack_handle_t backtrace; + } *entries; + int len; +}; +#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ + +struct drm_dp_mst_branch; + +/** + * struct drm_dp_vcpi - Virtual Channel Payload Identifier + * @vcpi: Virtual channel ID. + * @pbn: Payload Bandwidth Number for this channel + * @aligned_pbn: PBN aligned with slot size + * @num_slots: number of slots for this PBN + */ +struct drm_dp_vcpi { + int vcpi; + int pbn; + int aligned_pbn; + int num_slots; +}; + +/** + * struct drm_dp_mst_port - MST port + * @port_num: port number + * @input: if this port is an input port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mcs: message capability status - DP 1.2 spec. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @pdt: Peer Device Type. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @ldps: Legacy Device Plug Status. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @dpcd_rev: DPCD revision of device on this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_streams: Number of simultaneous streams. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @num_sdp_stream_sinks: Number of stream sinks. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @full_pbn: Max possible bandwidth for this port. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @next: link to next port on this branch device + * @aux: i2c aux transport to talk to device connected to this port, protected + * by &drm_dp_mst_topology_mgr.base.lock. + * @parent: branch device parent of this port + * @vcpi: Virtual Channel Payload info for this port. + * @connector: DRM connector this port is connected to. Protected by + * &drm_dp_mst_topology_mgr.base.lock. + * @mgr: topology manager this port lives under. + * + * This structure represents an MST port endpoint on a device somewhere + * in the MST topology. + */ +struct drm_dp_mst_port { + /** + * @topology_kref: refcount for this port's lifetime in the topology, + * only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_port_malloc() and + * drm_dp_mst_put_port_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + u8 port_num; + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + uint16_t full_pbn; + struct list_head next; + /** + * @mstb: the branch device connected to this port, if there is one. + * This should be considered protected for reading by + * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from writing concurrently + * by &drm_dp_mst_topology_mgr.probe_lock. + */ + struct drm_dp_mst_branch *mstb; + struct drm_dp_aux aux; /* i2c bus for this port? */ + struct drm_dp_mst_branch *parent; + + struct drm_dp_vcpi vcpi; + struct drm_connector *connector; + struct drm_dp_mst_topology_mgr *mgr; + + /** + * @cached_edid: for DP logical ports - make tiling work by ensuring + * that the EDID for all connectors is read immediately. + */ + struct edid *cached_edid; + /** + * @has_audio: Tracks whether the sink connector to this port is + * audio-capable. + */ + bool has_audio; + + /** + * @fec_capable: bool indicating if FEC can be supported up to that + * point in the MST topology. + */ + bool fec_capable; +}; + +/* sideband msg header - not bit struct */ +struct drm_dp_sideband_msg_hdr { + u8 lct; + u8 lcr; + u8 rad[8]; + bool broadcast; + bool path_msg; + u8 msg_len; + bool somt; + bool eomt; + bool seqno; +}; + +struct drm_dp_sideband_msg_rx { + u8 chunk[48]; + u8 msg[256]; + u8 curchunk_len; + u8 curchunk_idx; /* chunk we are parsing now */ + u8 curchunk_hdrlen; + u8 curlen; /* total length of the msg */ + bool have_somt; + bool have_eomt; + struct drm_dp_sideband_msg_hdr initial_hdr; +}; + +/** + * struct drm_dp_mst_branch - MST branch device. + * @rad: Relative Address to talk to this branch device. + * @lct: Link count total to talk to this branch device. + * @num_ports: number of ports on the branch. + * @port_parent: pointer to the port parent, NULL if toplevel. + * @mgr: topology manager for this branch device. + * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. + * + * This structure represents an MST branch device, there is one + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. + */ +struct drm_dp_mst_branch { + /** + * @topology_kref: refcount for this branch device's lifetime in the + * topology, only the DP MST helpers should need to touch this + */ + struct kref topology_kref; + + /** + * @malloc_kref: refcount for the memory allocation containing this + * structure. See drm_dp_mst_get_mstb_malloc() and + * drm_dp_mst_put_mstb_malloc(). + */ + struct kref malloc_kref; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history: A history of each topology + * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. + */ + struct drm_dp_mst_topology_ref_history topology_ref_history; +#endif + + /** + * @destroy_next: linked-list entry used by + * drm_dp_delayed_destroy_work() + */ + struct list_head destroy_next; + + u8 rad[8]; + u8 lct; + int num_ports; + + /** + * @ports: the list of ports on this branch device. This should be + * considered protected for reading by &drm_dp_mst_topology_mgr.lock. + * There are two exceptions to this: + * &drm_dp_mst_topology_mgr.up_req_work and + * &drm_dp_mst_topology_mgr.work, which do not grab + * &drm_dp_mst_topology_mgr.lock during reads but are the only + * updaters of this list and are protected from updating the list + * concurrently by @drm_dp_mst_topology_mgr.probe_lock + */ + struct list_head ports; + + struct drm_dp_mst_port *port_parent; + struct drm_dp_mst_topology_mgr *mgr; + + bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; +}; + + +struct drm_dp_nak_reply { + u8 guid[16]; + u8 reason; + u8 nak_data; +}; + +struct drm_dp_link_address_ack_reply { + u8 guid[16]; + u8 nports; + struct drm_dp_link_addr_reply_port { + bool input_port; + u8 peer_device_type; + u8 port_number; + bool mcs; + bool ddps; + bool legacy_device_plug_status; + u8 dpcd_revision; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + } ports[16]; +}; + +struct drm_dp_remote_dpcd_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_dpcd_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_remote_dpcd_write_nak_reply { + u8 port_number; + u8 reason; + u8 bytes_written_before_failure; +}; + +struct drm_dp_remote_i2c_read_ack_reply { + u8 port_number; + u8 num_bytes; + u8 bytes[255]; +}; + +struct drm_dp_remote_i2c_read_nak_reply { + u8 port_number; + u8 nak_reason; + u8 i2c_nak_transaction; +}; + +struct drm_dp_remote_i2c_write_ack_reply { + u8 port_number; +}; + +struct drm_dp_query_stream_enc_status_ack_reply { + /* Bit[23:16]- Stream Id */ + u8 stream_id; + + /* Bit[15]- Signed */ + bool reply_signed; + + /* Bit[10:8]- Stream Output Sink Type */ + bool unauthorizable_device_present; + bool legacy_device_present; + bool query_capable_device_present; + + /* Bit[12:11]- Stream Output CP Type */ + bool hdcp_1x_device_present; + bool hdcp_2x_device_present; + + /* Bit[4]- Stream Authentication */ + bool auth_completed; + + /* Bit[3]- Stream Encryption */ + bool encryption_enabled; + + /* Bit[2]- Stream Repeater Function Present */ + bool repeater_present; + + /* Bit[1:0]- Stream State */ + u8 state; +}; + +#define DRM_DP_MAX_SDP_STREAMS 16 +struct drm_dp_allocate_payload { + u8 port_number; + u8 number_sdp_streams; + u8 vcpi; + u16 pbn; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; +}; + +struct drm_dp_allocate_payload_ack_reply { + u8 port_number; + u8 vcpi; + u16 allocated_pbn; +}; + +struct drm_dp_connection_status_notify { + u8 guid[16]; + u8 port_number; + bool legacy_device_plug_status; + bool displayport_device_plug_status; + bool message_capability_status; + bool input_port; + u8 peer_device_type; +}; + +struct drm_dp_remote_dpcd_read { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; +}; + +struct drm_dp_remote_dpcd_write { + u8 port_number; + u32 dpcd_address; + u8 num_bytes; + u8 *bytes; +}; + +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 +struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; + struct drm_dp_remote_i2c_read_tx { + u8 i2c_dev_id; + u8 num_bytes; + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; + u8 read_i2c_device_id; + u8 num_bytes_read; +}; + +struct drm_dp_remote_i2c_write { + u8 port_number; + u8 write_i2c_device_id; + u8 num_bytes; + u8 *bytes; +}; + +struct drm_dp_query_stream_enc_status { + u8 stream_id; + u8 client_id[7]; /* 56-bit nonce */ + u8 stream_event; + bool valid_stream_event; + u8 stream_behavior; + u8 valid_stream_behavior; +}; + +/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_req { + u8 port_number; +}; + +struct drm_dp_enum_path_resources_ack_reply { + u8 port_number; + bool fec_capable; + u16 full_payload_bw_number; + u16 avail_payload_bw_number; +}; + +/* covers POWER_DOWN_PHY, POWER_UP_PHY */ +struct drm_dp_port_number_rep { + u8 port_number; +}; + +struct drm_dp_query_payload { + u8 port_number; + u8 vcpi; +}; + +struct drm_dp_resource_status_notify { + u8 port_number; + u8 guid[16]; + u16 available_pbn; +}; + +struct drm_dp_query_payload_ack_reply { + u8 port_number; + u16 allocated_pbn; +}; + +struct drm_dp_sideband_msg_req_body { + u8 req_type; + union ack_req { + struct drm_dp_connection_status_notify conn_stat; + struct drm_dp_port_number_req port_num; + struct drm_dp_resource_status_notify resource_stat; + + struct drm_dp_query_payload query_payload; + struct drm_dp_allocate_payload allocate_payload; + + struct drm_dp_remote_dpcd_read dpcd_read; + struct drm_dp_remote_dpcd_write dpcd_write; + + struct drm_dp_remote_i2c_read i2c_read; + struct drm_dp_remote_i2c_write i2c_write; + + struct drm_dp_query_stream_enc_status enc_status; + } u; +}; + +struct drm_dp_sideband_msg_reply_body { + u8 reply_type; + u8 req_type; + union ack_replies { + struct drm_dp_nak_reply nak; + struct drm_dp_link_address_ack_reply link_addr; + struct drm_dp_port_number_rep port_number; + + struct drm_dp_enum_path_resources_ack_reply path_resources; + struct drm_dp_allocate_payload_ack_reply allocate_payload; + struct drm_dp_query_payload_ack_reply query_payload; + + struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; + struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; + struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; + + struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; + struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; + struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; + + struct drm_dp_query_stream_enc_status_ack_reply enc_status; + } u; +}; + +/* msg is queued to be put into a slot */ +#define DRM_DP_SIDEBAND_TX_QUEUED 0 +/* msg has started transmitting on a slot - still on msgq */ +#define DRM_DP_SIDEBAND_TX_START_SEND 1 +/* msg has finished transmitting on a slot - removed from msgq only in slot */ +#define DRM_DP_SIDEBAND_TX_SENT 2 +/* msg has received a response - removed from slot */ +#define DRM_DP_SIDEBAND_TX_RX 3 +#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 + +struct drm_dp_sideband_msg_tx { + u8 msg[256]; + u8 chunk[48]; + u8 cur_offset; + u8 cur_len; + struct drm_dp_mst_branch *dst; + struct list_head next; + int seqno; + int state; + bool path_msg; + struct drm_dp_sideband_msg_reply_body reply; +}; + +/* sideband msg handler */ +struct drm_dp_mst_topology_mgr; +struct drm_dp_mst_topology_cbs { + /* create a connector for a port */ + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + /* + * Checks for any pending MST interrupts, passing them to MST core for + * processing, the same way an HPD IRQ pulse handler would do this. + * If provided MST core calls this callback from a poll-waiting loop + * when waiting for MST down message replies. The driver is expected + * to guard against a race between this callback and the driver's HPD + * IRQ pulse handler. + */ + void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); +}; + +#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) + +#define DP_PAYLOAD_LOCAL 1 +#define DP_PAYLOAD_REMOTE 2 +#define DP_PAYLOAD_DELETE_LOCAL 3 + +struct drm_dp_payload { + int payload_state; + int start_slot; + int num_slots; + int vcpi; +}; + +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + +struct drm_dp_vcpi_allocation { + struct drm_dp_mst_port *port; + int vcpi; + int pbn; + bool dsc_enabled; + struct list_head next; +}; + +struct drm_dp_mst_topology_state { + struct drm_private_state base; + struct list_head vcpis; + struct drm_dp_mst_topology_mgr *mgr; +}; + +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + +/** + * struct drm_dp_mst_topology_mgr - DisplayPort MST manager + * + * This struct represents the toplevel displayport MST topology manager. + * There should be one instance of this for every MST capable DP connector + * on the GPU. + */ +struct drm_dp_mst_topology_mgr { + /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + + /** + * @dev: device pointer for adding i2c devices etc. + */ + struct drm_device *dev; + /** + * @cbs: callbacks for connector addition and destruction. + */ + const struct drm_dp_mst_topology_cbs *cbs; + /** + * @max_dpcd_transaction_bytes: maximum number of bytes to read/write + * in one go. + */ + int max_dpcd_transaction_bytes; + /** + * @aux: AUX channel for the DP MST connector this topolgy mgr is + * controlling. + */ + struct drm_dp_aux *aux; + /** + * @max_payloads: maximum number of payloads the GPU can generate. + */ + int max_payloads; + /** + * @conn_base_id: DRM connector ID this mgr is connected to. Only used + * to build the MST connector path value. + */ + int conn_base_id; + + /** + * @up_req_recv: Message receiver state for up requests. + */ + struct drm_dp_sideband_msg_rx up_req_recv; + + /** + * @down_rep_recv: Message receiver state for replies to down + * requests. + */ + struct drm_dp_sideband_msg_rx down_rep_recv; + + /** + * @lock: protects @mst_state, @mst_primary, @dpcd, and + * @payload_id_table_cleared. + */ + struct mutex lock; + + /** + * @probe_lock: Prevents @work and @up_req_work, the only writers of + * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing + * while they update the topology. + */ + struct mutex probe_lock; + + /** + * @mst_state: If this manager is enabled for an MST capable port. False + * if no MST sink/branch devices is connected. + */ + bool mst_state : 1; + + /** + * @payload_id_table_cleared: Whether or not we've cleared the payload + * ID table for @mst_primary. Protected by @lock. + */ + bool payload_id_table_cleared : 1; + + /** + * @mst_primary: Pointer to the primary/first branch device. + */ + struct drm_dp_mst_branch *mst_primary; + + /** + * @dpcd: Cache of DPCD for primary port. + */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + /** + * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. + */ + u8 sink_count; + /** + * @pbn_div: PBN to slots divisor. + */ + int pbn_div; + + /** + * @funcs: Atomic helper callbacks + */ + const struct drm_private_state_funcs *funcs; + + /** + * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state + */ + struct mutex qlock; + + /** + * @tx_msg_downq: List of pending down requests + */ + struct list_head tx_msg_downq; + + /** + * @payload_lock: Protect payload information. + */ + struct mutex payload_lock; + /** + * @proposed_vcpis: Array of pointers for the new VCPI allocation. The + * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of + * this array is determined by @max_payloads. + */ + struct drm_dp_vcpi **proposed_vcpis; + /** + * @payloads: Array of payloads. The size of this array is determined + * by @max_payloads. + */ + struct drm_dp_payload *payloads; + /** + * @payload_mask: Elements of @payloads actually in use. Since + * reallocation of active outputs isn't possible gaps can be created by + * disabling outputs out of order compared to how they've been enabled. + */ + unsigned long payload_mask; + /** + * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. + */ + unsigned long vcpi_mask; + + /** + * @tx_waitq: Wait to queue stall for the tx worker. + */ + wait_queue_head_t tx_waitq; + /** + * @work: Probe work. + */ + struct work_struct work; + /** + * @tx_work: Sideband transmit worker. This can nest within the main + * @work worker for each transaction @work launches. + */ + struct work_struct tx_work; + + /** + * @destroy_port_list: List of to be destroyed connectors. + */ + struct list_head destroy_port_list; + /** + * @destroy_branch_device_list: List of to be destroyed branch + * devices. + */ + struct list_head destroy_branch_device_list; + /** + * @delayed_destroy_lock: Protects @destroy_port_list and + * @destroy_branch_device_list. + */ + struct mutex delayed_destroy_lock; + + /** + * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. + * A dedicated WQ makes it possible to drain any requeued work items + * on it. + */ + struct workqueue_struct *delayed_destroy_wq; + + /** + * @delayed_destroy_work: Work item to destroy MST port and branch + * devices, needed to avoid locking inversion. + */ + struct work_struct delayed_destroy_work; + + /** + * @up_req_list: List of pending up requests from the topology that + * need to be processed, in chronological order. + */ + struct list_head up_req_list; + /** + * @up_req_lock: Protects @up_req_list + */ + struct mutex up_req_lock; + /** + * @up_req_work: Work item to process up requests received from the + * topology. Needed to avoid blocking hotplug handling and sideband + * transmissions. + */ + struct work_struct up_req_work; + +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + /** + * @topology_ref_history_lock: protects + * &drm_dp_mst_port.topology_ref_history and + * &drm_dp_mst_branch.topology_ref_history. + */ + struct mutex topology_ref_history_lock; +#endif +}; + +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id); + +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); + +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); + +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); + + +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); + +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); + +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, int slots); + +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + + +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + + +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn); + + +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); + + +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); + +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr); + +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync); + +ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); +ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); + +int drm_dp_mst_connector_late_register(struct drm_connector *connector, + struct drm_dp_mst_port *port); +void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, + struct drm_dp_mst_port *port); + +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, int pbn, + int pbn_div); +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, + struct drm_dp_mst_port *port, + int pbn, int pbn_div, + bool enable); +int __must_check +drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +int __must_check +drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, bool power_up); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); + +void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); +void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); + +struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); + +extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; + +/** + * __drm_dp_mst_state_iter_get - private atomic state iterator function for + * macro-internal use + * @state: &struct drm_atomic_state pointer + * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state + * iteration cursor + * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state + * iteration cursor + * @i: int iteration cursor, for macro-internal use + * + * Used by for_each_oldnew_mst_mgr_in_state(), + * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't + * call this directly. + * + * Returns: + * True if the current &struct drm_private_obj is a &struct + * drm_dp_mst_topology_mgr, false otherwise. + */ +static inline bool +__drm_dp_mst_state_iter_get(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr **mgr, + struct drm_dp_mst_topology_state **old_state, + struct drm_dp_mst_topology_state **new_state, + int i) +{ + struct __drm_private_objs_state *objs_state = &state->private_objs[i]; + + if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) + return false; + + *mgr = to_dp_mst_topology_mgr(objs_state->ptr); + if (old_state) + *old_state = to_dp_mst_topology_state(objs_state->old_state); + if (new_state) + *new_state = to_dp_mst_topology_state(objs_state->new_state); + + return true; +} + +/** + * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology + * managers in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking both old and new state. This is useful in places where the state + * delta needs to be considered, for example in atomic check functions. + */ +#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) + +/** + * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the old state. This is useful in disable functions, where we + * need the old state the hardware is still in. + */ +#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) + +/** + * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers + * in an atomic update + * @__state: &struct drm_atomic_state pointer + * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor + * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new + * state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all DRM DP MST topology managers in an atomic update, + * tracking only the new state. This is useful in enable functions, where we + * need the new state the hardware should be in when the atomic commit + * operation has completed. + */ +#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ + for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ + for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h new file mode 100644 index 000000000..e57d0440f --- /dev/null +++ b/include/drm/drm_drv.h @@ -0,0 +1,714 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_DRV_H_ +#define _DRM_DRV_H_ + +#include <linux/list.h> +#include <linux/irqreturn.h> + +#include <drm/drm_device.h> + +struct drm_file; +struct drm_gem_object; +struct drm_master; +struct drm_minor; +struct dma_buf_attachment; +struct drm_display_mode; +struct drm_mode_create_dumb; +struct drm_printer; + +/** + * enum drm_driver_feature - feature flags + * + * See &drm_driver.driver_features, drm_device.driver_features and + * drm_core_check_feature(). + */ +enum drm_driver_feature { + /** + * @DRIVER_GEM: + * + * Driver use the GEM memory manager. This should be set for all modern + * drivers. + */ + DRIVER_GEM = BIT(0), + /** + * @DRIVER_MODESET: + * + * Driver supports mode setting interfaces (KMS). + */ + DRIVER_MODESET = BIT(1), + /** + * @DRIVER_RENDER: + * + * Driver supports dedicated render nodes. See also the :ref:`section on + * render nodes <drm_render_node>` for details. + */ + DRIVER_RENDER = BIT(3), + /** + * @DRIVER_ATOMIC: + * + * Driver supports the full atomic modesetting userspace API. Drivers + * which only use atomic internally, but do not the support the full + * userspace API (e.g. not all properties converted to atomic, or + * multi-plane updates are not guaranteed to be tear-free) should not + * set this flag. + */ + DRIVER_ATOMIC = BIT(4), + /** + * @DRIVER_SYNCOBJ: + * + * Driver supports &drm_syncobj for explicit synchronization of command + * submission. + */ + DRIVER_SYNCOBJ = BIT(5), + /** + * @DRIVER_SYNCOBJ_TIMELINE: + * + * Driver supports the timeline flavor of &drm_syncobj for explicit + * synchronization of command submission. + */ + DRIVER_SYNCOBJ_TIMELINE = BIT(6), + + /* IMPORTANT: Below are all the legacy flags, add new ones above. */ + + /** + * @DRIVER_USE_AGP: + * + * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage + * AGP resources. New drivers don't need this. + */ + DRIVER_USE_AGP = BIT(25), + /** + * @DRIVER_LEGACY: + * + * Denote a legacy driver using shadow attach. Do not use. + */ + DRIVER_LEGACY = BIT(26), + /** + * @DRIVER_PCI_DMA: + * + * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace + * will be enabled. Only for legacy drivers. Do not use. + */ + DRIVER_PCI_DMA = BIT(27), + /** + * @DRIVER_SG: + * + * Driver can perform scatter/gather DMA, allocation and mapping of + * scatter/gather buffers will be enabled. Only for legacy drivers. Do + * not use. + */ + DRIVER_SG = BIT(28), + + /** + * @DRIVER_HAVE_DMA: + * + * Driver supports DMA, the userspace DMA API will be supported. Only + * for legacy drivers. Do not use. + */ + DRIVER_HAVE_DMA = BIT(29), + /** + * @DRIVER_HAVE_IRQ: + * + * Legacy irq support. Only for legacy drivers. Do not use. + * + * New drivers can either use the drm_irq_install() and + * drm_irq_uninstall() helper functions, or roll their own irq support + * code by calling request_irq() directly. + */ + DRIVER_HAVE_IRQ = BIT(30), + /** + * @DRIVER_KMS_LEGACY_CONTEXT: + * + * Used only by nouveau for backwards compatibility with existing + * userspace. Do not use. + */ + DRIVER_KMS_LEGACY_CONTEXT = BIT(31), +}; + +/** + * struct drm_driver - DRM driver structure + * + * This structure represent the common code for a family of cards. There will be + * one &struct drm_device for each card present in this family. It contains lots + * of vfunc entries, and a pile of those probably should be moved to more + * appropriate places like &drm_mode_config_funcs or into a new operations + * structure for GEM drivers. + */ +struct drm_driver { + /** + * @load: + * + * Backward-compatible driver callback to complete initialization steps + * after the driver is registered. For this reason, may suffer from + * race conditions and its use is deprecated for new drivers. It is + * therefore only supported for existing drivers not yet converted to + * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for + * proper and race-free way to set up a &struct drm_device. + * + * This is deprecated, do not use! + * + * Returns: + * + * Zero on success, non-zero value on failure. + */ + int (*load) (struct drm_device *, unsigned long flags); + + /** + * @open: + * + * Driver callback when a new &struct drm_file is opened. Useful for + * setting up driver-private data structures like buffer allocators, + * execution contexts or similar things. Such driver-private resources + * must be released again in @postclose. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to set up any modeset related resources + * in this callback. Doing so would be a driver design bug. + * + * Returns: + * + * 0 on success, a negative error code on failure, which will be + * promoted to userspace as the result of the open() system call. + */ + int (*open) (struct drm_device *, struct drm_file *); + + /** + * @postclose: + * + * One of the driver callbacks when a new &struct drm_file is closed. + * Useful for tearing down driver-private data structures allocated in + * @open like buffer allocators, execution contexts or similar things. + * + * Since the display/modeset side of DRM can only be owned by exactly + * one &struct drm_file (see &drm_file.is_master and &drm_device.master) + * there should never be a need to tear down any modeset related + * resources in this callback. Doing so would be a driver design bug. + */ + void (*postclose) (struct drm_device *, struct drm_file *); + + /** + * @lastclose: + * + * Called when the last &struct drm_file has been closed and there's + * currently no userspace client for the &struct drm_device. + * + * Modern drivers should only use this to force-restore the fbdev + * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). + * Anything else would indicate there's something seriously wrong. + * Modern drivers can also use this to execute delayed power switching + * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` + * infrastructure. + * + * This is called after @postclose hook has been called. + * + * NOTE: + * + * All legacy drivers use this callback to de-initialize the hardware. + * This is purely because of the shadow-attach model, where the DRM + * kernel driver does not really own the hardware. Instead ownershipe is + * handled with the help of userspace through an inheritedly racy dance + * to set/unset the VT into raw mode. + * + * Legacy drivers initialize the hardware in the @firstopen callback, + * which isn't even called for modern drivers. + */ + void (*lastclose) (struct drm_device *); + + /** + * @unload: + * + * Reverse the effects of the driver load callback. Ideally, + * the clean up performed by the driver should happen in the + * reverse order of the initialization. Similarly to the load + * hook, this handler is deprecated and its usage should be + * dropped in favor of an open-coded teardown function at the + * driver layer. See drm_dev_unregister() and drm_dev_put() + * for the proper way to remove a &struct drm_device. + * + * The unload() hook is called right after unregistering + * the device. + * + */ + void (*unload) (struct drm_device *); + + /** + * @release: + * + * Optional callback for destroying device data after the final + * reference is released, i.e. the device is being destroyed. + * + * This is deprecated, clean up all memory allocations associated with a + * &drm_device using drmm_add_action(), drmm_kmalloc() and related + * managed resources functions. + */ + void (*release) (struct drm_device *); + + /** + * @irq_handler: + * + * Interrupt handler called when using drm_irq_install(). Not used by + * drivers which implement their own interrupt handling. + */ + irqreturn_t(*irq_handler) (int irq, void *arg); + + /** + * @irq_preinstall: + * + * Optional callback used by drm_irq_install() which is called before + * the interrupt handler is registered. This should be used to clear out + * any pending interrupts (from e.g. firmware based drives) and reset + * the interrupt handling registers. + */ + void (*irq_preinstall) (struct drm_device *dev); + + /** + * @irq_postinstall: + * + * Optional callback used by drm_irq_install() which is called after + * the interrupt handler is registered. This should be used to enable + * interrupt generation in the hardware. + */ + int (*irq_postinstall) (struct drm_device *dev); + + /** + * @irq_uninstall: + * + * Optional callback used by drm_irq_uninstall() which is called before + * the interrupt handler is unregistered. This should be used to disable + * interrupt generation in the hardware. + */ + void (*irq_uninstall) (struct drm_device *dev); + + /** + * @master_set: + * + * Called whenever the minor master is set. Only used by vmwgfx. + */ + void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + /** + * @master_drop: + * + * Called whenever the minor master is dropped. Only used by vmwgfx. + */ + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); + + /** + * @debugfs_init: + * + * Allows drivers to create driver-specific debugfs files. + */ + void (*debugfs_init)(struct drm_minor *minor); + + /** + * @gem_free_object_unlocked: deconstructor for drm_gem_objects + * + * This is deprecated and should not be used by new drivers. Use + * &drm_gem_object_funcs.free instead. + */ + void (*gem_free_object_unlocked) (struct drm_gem_object *obj); + + /** + * @gem_open_object: + * + * This callback is deprecated in favour of &drm_gem_object_funcs.open. + * + * Driver hook called upon gem handle creation + */ + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + + /** + * @gem_close_object: + * + * This callback is deprecated in favour of &drm_gem_object_funcs.close. + * + * Driver hook called upon gem handle release + */ + void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + + /** + * @gem_create_object: constructor for gem objects + * + * Hook for allocating the GEM object struct, for use by the CMA and + * SHMEM GEM helpers. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + /** + * @prime_handle_to_fd: + * + * Main PRIME export function. Should be implemented with + * drm_gem_prime_handle_to_fd() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. + */ + int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); + /** + * @prime_fd_to_handle: + * + * Main PRIME import function. Should be implemented with + * drm_gem_prime_fd_to_handle() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. + */ + int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, + int prime_fd, uint32_t *handle); + /** + * @gem_prime_export: + * + * Export hook for GEM drivers. Deprecated in favour of + * &drm_gem_object_funcs.export. + */ + struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, + int flags); + /** + * @gem_prime_import: + * + * Import hook for GEM drivers. + * + * This defaults to drm_gem_prime_import() if not set. + */ + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + + /** + * @gem_prime_pin: + * + * Deprecated hook in favour of &drm_gem_object_funcs.pin. + */ + int (*gem_prime_pin)(struct drm_gem_object *obj); + + /** + * @gem_prime_unpin: + * + * Deprecated hook in favour of &drm_gem_object_funcs.unpin. + */ + void (*gem_prime_unpin)(struct drm_gem_object *obj); + + + /** + * @gem_prime_get_sg_table: + * + * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. + */ + struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); + + /** + * @gem_prime_import_sg_table: + * + * Optional hook used by the PRIME helper functions + * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). + */ + struct drm_gem_object *(*gem_prime_import_sg_table)( + struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + /** + * @gem_prime_vmap: + * + * Deprecated vmap hook for GEM drivers. Please use + * &drm_gem_object_funcs.vmap instead. + */ + void *(*gem_prime_vmap)(struct drm_gem_object *obj); + + /** + * @gem_prime_vunmap: + * + * Deprecated vunmap hook for GEM drivers. Please use + * &drm_gem_object_funcs.vunmap instead. + */ + void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); + + /** + * @gem_prime_mmap: + * + * mmap hook for GEM drivers, used to implement dma-buf mmap in the + * PRIME helpers. + * + * FIXME: There's way too much duplication going on here, and also moved + * to &drm_gem_object_funcs. + */ + int (*gem_prime_mmap)(struct drm_gem_object *obj, + struct vm_area_struct *vma); + + /** + * @dumb_create: + * + * This creates a new dumb buffer in the driver's backing storage manager (GEM, + * TTM or something else entirely) and returns the resulting buffer handle. This + * handle can then be wrapped up into a framebuffer modeset object. + * + * Note that userspace is not allowed to use such objects for render + * acceleration - drivers must create their own private ioctls for such a use + * case. + * + * Width, height and depth are specified in the &drm_mode_create_dumb + * argument. The callback needs to fill the handle, pitch and size for + * the created buffer. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_create)(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + /** + * @dumb_map_offset: + * + * Allocate an offset in the drm device node's address space to be able to + * memory map a dumb buffer. + * + * The default implementation is drm_gem_create_mmap_offset(). GEM based + * drivers must not overwrite this. + * + * Called by the user via ioctl. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_map_offset)(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + /** + * @dumb_destroy: + * + * This destroys the userspace handle for the given dumb backing storage buffer. + * Since buffer objects must be reference counted in the kernel a buffer object + * won't be immediately freed if a framebuffer modeset object still uses it. + * + * Called by the user via ioctl. + * + * The default implementation is drm_gem_dumb_destroy(). GEM based drivers + * must not overwrite this. + * + * Returns: + * + * Zero on success, negative errno on failure. + */ + int (*dumb_destroy)(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); + + /** + * @gem_vm_ops: Driver private ops for this object + * + * For GEM drivers this is deprecated in favour of + * &drm_gem_object_funcs.vm_ops. + */ + const struct vm_operations_struct *gem_vm_ops; + + /** @major: driver major number */ + int major; + /** @minor: driver minor number */ + int minor; + /** @patchlevel: driver patch level */ + int patchlevel; + /** @name: driver name */ + char *name; + /** @desc: driver description */ + char *desc; + /** @date: driver date */ + char *date; + + /** + * @driver_features: + * Driver features, see &enum drm_driver_feature. Drivers can disable + * some features on a per-instance basis using + * &drm_device.driver_features. + */ + u32 driver_features; + + /** + * @ioctls: + * + * Array of driver-private IOCTL description entries. See the chapter on + * :ref:`IOCTL support in the userland interfaces + * chapter<drm_driver_ioctl>` for the full details. + */ + + const struct drm_ioctl_desc *ioctls; + /** @num_ioctls: Number of entries in @ioctls. */ + int num_ioctls; + + /** + * @fops: + * + * File operations for the DRM device node. See the discussion in + * :ref:`file operations<drm_driver_fops>` for in-depth coverage and + * some examples. + */ + const struct file_operations *fops; + + /* Everything below here is for legacy driver, never use! */ + /* private: */ + + /* List of devices hanging off this driver with stealth attach. */ + struct list_head legacy_dev_list; + int (*firstopen) (struct drm_device *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); + int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); + void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); + int dev_priv_size; +}; + +void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, + size_t size, size_t offset); + +/** + * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance + * @parent: Parent device object + * @driver: DRM driver + * @type: the type of the struct which contains struct &drm_device + * @member: the name of the &drm_device within @type. + * + * This allocates and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_get() and + * drm_dev_put() to take and drop further ref-counts. + * + * It is recommended that drivers embed &struct drm_device into their own device + * structure. + * + * Note that this manages the lifetime of the resulting &drm_device + * automatically using devres. The DRM device initialized with this function is + * automatically put on driver detach using drm_dev_put(). + * + * RETURNS: + * Pointer to new DRM device, or ERR_PTR on failure. + */ +#define devm_drm_dev_alloc(parent, driver, type, member) \ + ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ + offsetof(type, member))) + +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent); +int drm_dev_register(struct drm_device *dev, unsigned long flags); +void drm_dev_unregister(struct drm_device *dev); + +void drm_dev_get(struct drm_device *dev); +void drm_dev_put(struct drm_device *dev); +void drm_put_dev(struct drm_device *dev); +bool drm_dev_enter(struct drm_device *dev, int *idx); +void drm_dev_exit(int idx); +void drm_dev_unplug(struct drm_device *dev); + +/** + * drm_dev_is_unplugged - is a DRM device unplugged + * @dev: DRM device + * + * This function can be called to check whether a hotpluggable is unplugged. + * Unplugging itself is singalled through drm_dev_unplug(). If a device is + * unplugged, these two functions guarantee that any store before calling + * drm_dev_unplug() is visible to callers of this function after it completes + * + * WARNING: This function fundamentally races against drm_dev_unplug(). It is + * recommended that drivers instead use the underlying drm_dev_enter() and + * drm_dev_exit() function pairs. + */ +static inline bool drm_dev_is_unplugged(struct drm_device *dev) +{ + int idx; + + if (drm_dev_enter(dev, &idx)) { + drm_dev_exit(idx); + return false; + } + + return true; +} + +/** + * drm_core_check_all_features - check driver feature flags mask + * @dev: DRM device to check + * @features: feature flag(s) mask + * + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various &enum drm_driver_feature flags. + * + * Returns true if all features in the @features mask are supported, false + * otherwise. + */ +static inline bool drm_core_check_all_features(const struct drm_device *dev, + u32 features) +{ + u32 supported = dev->driver->driver_features & dev->driver_features; + + return features && (supported & features) == features; +} + +/** + * drm_core_check_feature - check driver feature flags + * @dev: DRM device to check + * @feature: feature flag + * + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various &enum drm_driver_feature flags. + * + * Returns true if the @feature is supported, false otherwise. + */ +static inline bool drm_core_check_feature(const struct drm_device *dev, + enum drm_driver_feature feature) +{ + return drm_core_check_all_features(dev, feature); +} + +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return drm_core_check_feature(dev, DRIVER_ATOMIC) || + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); +} + + +int drm_dev_set_unique(struct drm_device *dev, const char *name); + + +#endif diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h new file mode 100644 index 000000000..732f32740 --- /dev/null +++ b/include/drm/drm_dsc.h @@ -0,0 +1,609 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#ifndef DRM_DSC_H_ +#define DRM_DSC_H_ + +#include <drm/drm_dp_helper.h> + +/* VESA Display Stream Compression DSC 1.2 constants */ +#define DSC_NUM_BUF_RANGES 15 +#define DSC_MUX_WORD_SIZE_8_10_BPC 48 +#define DSC_MUX_WORD_SIZE_12_BPC 64 +#define DSC_RC_PIXELS_PER_GROUP 3 +#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095 +#define DSC_RANGE_BPG_OFFSET_MASK 0x3f + +/* DSC Rate Control Constants */ +#define DSC_RC_MODEL_SIZE_CONST 8192 +#define DSC_RC_EDGE_FACTOR_CONST 6 +#define DSC_RC_TGT_OFFSET_HI_CONST 3 +#define DSC_RC_TGT_OFFSET_LO_CONST 3 + +/* DSC PPS constants and macros */ +#define DSC_PPS_VERSION_MAJOR_SHIFT 4 +#define DSC_PPS_BPC_SHIFT 4 +#define DSC_PPS_MSB_SHIFT 8 +#define DSC_PPS_LSB_MASK (0xFF << 0) +#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8) +#define DSC_PPS_VBR_EN_SHIFT 2 +#define DSC_PPS_SIMPLE422_SHIFT 3 +#define DSC_PPS_CONVERT_RGB_SHIFT 4 +#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5 +#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8) +#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8) +#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4 +#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 +#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 +#define DSC_PPS_NATIVE_420_SHIFT 1 +#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 +#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 +#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 + +/** + * struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters + * + * This defines different rate control parameters used by the DSC engine + * to compress the frame. + */ +struct drm_dsc_rc_range_parameters { + /** + * @range_min_qp: Min Quantization Parameters allowed for this range + */ + u8 range_min_qp; + /** + * @range_max_qp: Max Quantization Parameters allowed for this range + */ + u8 range_max_qp; + /** + * @range_bpg_offset: + * Bits/group offset to apply to target for this group + */ + u8 range_bpg_offset; +}; + +/** + * struct drm_dsc_config - Parameters required to configure DSC + * + * Driver populates this structure with all the parameters required + * to configure the display stream compression on the source. + */ +struct drm_dsc_config { + /** + * @line_buf_depth: + * Bits per component for previous reconstructed line buffer + */ + u8 line_buf_depth; + /** + * @bits_per_component: Bits per component to code (8/10/12) + */ + u8 bits_per_component; + /** + * @convert_rgb: + * Flag to indicate if RGB - YCoCg conversion is needed + * True if RGB input, False if YCoCg input + */ + bool convert_rgb; + /** + * @slice_count: Number fo slices per line used by the DSC encoder + */ + u8 slice_count; + /** + * @slice_width: Width of each slice in pixels + */ + u16 slice_width; + /** + * @slice_height: Slice height in pixels + */ + u16 slice_height; + /** + * @simple_422: True if simple 4_2_2 mode is enabled else False + */ + bool simple_422; + /** + * @pic_width: Width of the input display frame in pixels + */ + u16 pic_width; + /** + * @pic_height: Vertical height of the input display frame + */ + u16 pic_height; + /** + * @rc_tgt_offset_high: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_high; + /** + * @rc_tgt_offset_low: + * Offset to bits/group used by RC to determine QP adjustment + */ + u8 rc_tgt_offset_low; + /** + * @bits_per_pixel: + * Target bits per pixel with 4 fractional bits, bits_per_pixel << 4 + */ + u16 bits_per_pixel; + /** + * @rc_edge_factor: + * Factor to determine if an edge is present based on the bits produced + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit1: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit1; + /** + * @rc_quant_incr_limit0: + * Slow down incrementing once the range reaches this value + */ + u8 rc_quant_incr_limit0; + /** + * @initial_xmit_delay: + * Number of pixels to delay the initial transmission + */ + u16 initial_xmit_delay; + /** + * @initial_dec_delay: + * Initial decoder delay, number of pixel times that the decoder + * accumulates data in its rate buffer before starting to decode + * and output pixels. + */ + u16 initial_dec_delay; + /** + * @block_pred_enable: + * True if block prediction is used to code any groups within the + * picture. False if BP not used + */ + bool block_pred_enable; + /** + * @first_line_bpg_offset: + * Number of additional bits allocated for each group on the first + * line of slice. + */ + u8 first_line_bpg_offset; + /** + * @initial_offset: Value to use for RC model offset at slice start + */ + u16 initial_offset; + /** + * @rc_buf_thresh: Thresholds defining each of the buffer ranges + */ + u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_params: + * Parameters for each of the RC ranges defined in + * &struct drm_dsc_rc_range_parameters + */ + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; + /** + * @rc_model_size: Total size of RC model + */ + u16 rc_model_size; + /** + * @flatness_min_qp: Minimum QP where flatness information is sent + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: Maximum QP where flatness information is sent + */ + u8 flatness_max_qp; + /** + * @initial_scale_value: Initial value for the scale factor + */ + u8 initial_scale_value; + /** + * @scale_decrement_interval: + * Specifies number of group times between decrementing the scale factor + * at beginning of a slice. + */ + u16 scale_decrement_interval; + /** + * @scale_increment_interval: + * Number of group times between incrementing the scale factor value + * used at the beginning of a slice. + */ + u16 scale_increment_interval; + /** + * @nfl_bpg_offset: Non first line BPG offset to be used + */ + u16 nfl_bpg_offset; + /** + * @slice_bpg_offset: BPG offset used to enforce slice bit + */ + u16 slice_bpg_offset; + /** + * @final_offset: Final RC linear transformation offset value + */ + u16 final_offset; + /** + * @vbr_enable: True if VBR mode is enabled, false if disabled + */ + bool vbr_enable; + /** + * @mux_word_size: Mux word size (in bits) for SSM mode + */ + u8 mux_word_size; + /** + * @slice_chunk_size: + * The (max) size in bytes of the "chunks" that are used in slice + * multiplexing. + */ + u16 slice_chunk_size; + /** + * @rc_bits: Rate control buffer size in bits + */ + u16 rc_bits; + /** + * @dsc_version_minor: DSC minor version + */ + u8 dsc_version_minor; + /** + * @dsc_version_major: DSC major version + */ + u8 dsc_version_major; + /** + * @native_422: True if Native 4:2:2 supported, else false + */ + bool native_422; + /** + * @native_420: True if Native 4:2:0 supported else false. + */ + bool native_420; + /** + * @second_line_bpg_offset: + * Additional bits/grp for seconnd line of slice for native 4:2:0 + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * Num of bits deallocated for each grp that is not in second line of + * slice + */ + u16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * Offset adjustment for second line in Native 4:2:0 mode + */ + u16 second_line_offset_adj; +}; + +/** + * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set + * + * The VESA DSC standard defines picture parameter set (PPS) which display + * stream compression encoders must communicate to decoders. + * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in + * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2. + * The PPS fields that span over more than a byte should be stored in Big Endian + * format. + */ +struct drm_dsc_picture_parameter_set { + /** + * @dsc_version: + * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC + * PPS0[7:4] - dsc_version_major: Contains major version of DSC + */ + u8 dsc_version; + /** + * @pps_identifier: + * PPS1[7:0] - Application specific identifier that can be + * used to differentiate between different PPS tables. + */ + u8 pps_identifier; + /** + * @pps_reserved: + * PPS2[7:0]- RESERVED Byte + */ + u8 pps_reserved; + /** + * @pps_3: + * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to + * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits, + * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits, + * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2. + * PPS3[7:4] - bits_per_component: Bits per component for the original + * pixels of the encoded picture. + * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2) + * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also + * allowed only when dsc_minor_version = 0x2) + */ + u8 pps_3; + /** + * @pps_4: + * PPS4[1:0] -These are the most significant 2 bits of + * compressed BPP bits_per_pixel[9:0] syntax element. + * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled + * PPS4[3] - simple_422: Indicates if decoder drops samples to + * reconstruct the 4:2:2 picture. + * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is + * active. + * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any + * groups in picture + * PPS4[7:6] - Reseved bits + */ + u8 pps_4; + /** + * @bits_per_pixel_low: + * PPS5[7:0] - This indicates the lower significant 8 bits of + * the compressed BPP bits_per_pixel[9:0] element. + */ + u8 bits_per_pixel_low; + /** + * @pic_height: + * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows + * within the raster. + */ + __be16 pic_height; + /** + * @pic_width: + * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within + * the raster. + */ + __be16 pic_width; + /** + * @slice_height: + * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels. + */ + __be16 slice_height; + /** + * @slice_width: + * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels. + */ + __be16 slice_width; + /** + * @chunk_size: + * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks + * that are used for slice multiplexing. + */ + __be16 chunk_size; + /** + * @initial_xmit_delay_high: + * PPS16[1:0] - Most Significant two bits of initial transmission delay. + * It specifies the number of pixel times that the encoder waits before + * transmitting data from its rate buffer. + * PPS16[7:2] - Reserved + */ + u8 initial_xmit_delay_high; + /** + * @initial_xmit_delay_low: + * PPS17[7:0] - Least significant 8 bits of initial transmission delay. + */ + u8 initial_xmit_delay_low; + /** + * @initial_dec_delay: + * + * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number + * of pixel times that the decoder accumulates data in its rate buffer + * before starting to decode and output pixels. + */ + __be16 initial_dec_delay; + /** + * @pps20_reserved: + * + * PPS20[7:0] - Reserved + */ + u8 pps20_reserved; + /** + * @initial_scale_value: + * PPS21[5:0] - Initial rcXformScale factor used at beginning + * of a slice. + * PPS21[7:6] - Reserved + */ + u8 initial_scale_value; + /** + * @scale_increment_interval: + * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing + * the rcXformScale factor at end of a slice. + */ + __be16 scale_increment_interval; + /** + * @scale_decrement_interval_high: + * PPS24[3:0] - Higher 4 bits indicating number of group times between + * decrementing the rcXformScale factor at beginning of a slice. + * PPS24[7:4] - Reserved + */ + u8 scale_decrement_interval_high; + /** + * @scale_decrement_interval_low: + * PPS25[7:0] - Lower 8 bits of scale decrement interval + */ + u8 scale_decrement_interval_low; + /** + * @pps26_reserved: + * PPS26[7:0] + */ + u8 pps26_reserved; + /** + * @first_line_bpg_offset: + * PPS27[4:0] - Number of additional bits that are allocated + * for each group on first line of a slice. + * PPS27[7:5] - Reserved + */ + u8 first_line_bpg_offset; + /** + * @nfl_bpg_offset: + * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits + * deallocated for each group for groups after the first line of slice. + */ + __be16 nfl_bpg_offset; + /** + * @slice_bpg_offset: + * PPS30, PPS31[7:0] - Number of bits that are deallocated for each + * group to enforce the slice constraint. + */ + __be16 slice_bpg_offset; + /** + * @initial_offset: + * PPS32,33[7:0] - Initial value for rcXformOffset + */ + __be16 initial_offset; + /** + * @final_offset: + * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset + */ + __be16 final_offset; + /** + * @flatness_min_qp: + * PPS36[4:0] - Minimum QP at which flatness is signaled and + * flatness QP adjustment is made. + * PPS36[7:5] - Reserved + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: + * PPS37[4:0] - Max QP at which flatness is signalled and + * the flatness adjustment is made. + * PPS37[7:5] - Reserved + */ + u8 flatness_max_qp; + /** + * @rc_model_size: + * PPS38,39[7:0] - Number of bits within RC Model. + */ + __be16 rc_model_size; + /** + * @rc_edge_factor: + * PPS40[3:0] - Ratio of current activity vs, previous + * activity to determine presence of edge. + * PPS40[7:4] - Reserved + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit0: + * PPS41[4:0] - QP threshold used in short term RC + * PPS41[7:5] - Reserved + */ + u8 rc_quant_incr_limit0; + /** + * @rc_quant_incr_limit1: + * PPS42[4:0] - QP threshold used in short term RC + * PPS42[7:5] - Reserved + */ + u8 rc_quant_incr_limit1; + /** + * @rc_tgt_offset: + * PPS43[3:0] - Lower end of the variability range around the target + * bits per group that is allowed by short term RC. + * PPS43[7:4]- Upper end of the variability range around the target + * bits per group that i allowed by short term rc. + */ + u8 rc_tgt_offset; + /** + * @rc_buf_thresh: + * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for + * the 15 ranges defined by 14 thresholds. + */ + u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_parameters: + * PPS58[7:0] - PPS87[7:0] + * Parameters that correspond to each of the 15 ranges. + */ + __be16 rc_range_parameters[DSC_NUM_BUF_RANGES]; + /** + * @native_422_420: + * PPS88[0] - 0 = Native 4:2:2 not used + * 1 = Native 4:2:2 used + * PPS88[1] - 0 = Native 4:2:0 not use + * 1 = Native 4:2:0 used + * PPS88[7:2] - Reserved 6 bits + */ + u8 native_422_420; + /** + * @second_line_bpg_offset: + * PPS89[4:0] - Additional bits/group budget for the + * second line of a slice in Native 4:2:0 mode. + * Set to 0 if DSC minor version is 1 or native420 is 0. + * PPS89[7:5] - Reserved + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated + * for each group that is not in the second line of a slice. + */ + __be16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second + * line in Native 4:2:0 mode. + */ + __be16 second_line_offset_adj; + /** + * @pps_long_94_reserved: + * PPS 94, 95, 96, 97 - Reserved + */ + u32 pps_long_94_reserved; + /** + * @pps_long_98_reserved: + * PPS 98, 99, 100, 101 - Reserved + */ + u32 pps_long_98_reserved; + /** + * @pps_long_102_reserved: + * PPS 102, 103, 104, 105 - Reserved + */ + u32 pps_long_102_reserved; + /** + * @pps_long_106_reserved: + * PPS 106, 107, 108, 109 - reserved + */ + u32 pps_long_106_reserved; + /** + * @pps_long_110_reserved: + * PPS 110, 111, 112, 113 - reserved + */ + u32 pps_long_110_reserved; + /** + * @pps_long_114_reserved: + * PPS 114 - 117 - reserved + */ + u32 pps_long_114_reserved; + /** + * @pps_long_118_reserved: + * PPS 118 - 121 - reserved + */ + u32 pps_long_118_reserved; + /** + * @pps_long_122_reserved: + * PPS 122- 125 - reserved + */ + u32 pps_long_122_reserved; + /** + * @pps_short_126_reserved: + * PPS 126, 127 - reserved + */ + __be16 pps_short_126_reserved; +} __packed; + +/** + * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter + * Set Metadata + * + * This structure represents the DSC PPS infoframe required to send the Picture + * Parameter Set metadata required before enabling VESA Display Stream + * Compression. This is based on the DP Secondary Data Packet structure and + * comprises of SDP Header as defined &struct dp_sdp_header in drm_dp_helper.h + * and PPS payload defined in &struct drm_dsc_picture_parameter_set. + * + * @pps_header: Header for PPS as per DP SDP header format of type + * &struct dp_sdp_header + * @pps_payload: PPS payload fields as per DSC specification Table 4-1 + * as represented in &struct drm_dsc_picture_parameter_set + */ +struct drm_dsc_pps_infoframe { + struct dp_sdp_header pps_header; + struct drm_dsc_picture_parameter_set pps_payload; +} __packed; + +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, + const struct drm_dsc_config *dsc_cfg); +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg); + +#endif /* _DRM_DSC_H_ */ diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h new file mode 100644 index 000000000..4526b6a1e --- /dev/null +++ b/include/drm/drm_edid.h @@ -0,0 +1,517 @@ +/* + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_EDID_H__ +#define __DRM_EDID_H__ + +#include <linux/types.h> +#include <linux/hdmi.h> +#include <drm/drm_mode.h> + +struct drm_device; +struct i2c_adapter; + +#define EDID_LENGTH 128 +#define DDC_ADDR 0x50 +#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ + +#define CEA_EXT 0x02 +#define VTB_EXT 0x10 +#define DI_EXT 0x40 +#define LS_EXT 0x50 +#define MI_EXT 0x60 +#define DISPLAYID_EXT 0x70 + +struct est_timings { + u8 t1; + u8 t2; + u8 mfg_rsvd; +} __attribute__((packed)); + +/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */ +#define EDID_TIMING_ASPECT_SHIFT 6 +#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT) + +/* need to add 60 */ +#define EDID_TIMING_VFREQ_SHIFT 0 +#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT) + +struct std_timing { + u8 hsize; /* need to multiply by 8 then add 248 */ + u8 vfreq_aspect; +} __attribute__((packed)); + +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) +#define DRM_EDID_PT_STEREO (1 << 5) +#define DRM_EDID_PT_INTERLACED (1 << 7) + +/* If detailed data is pixel timing */ +struct detailed_pixel_timing { + u8 hactive_lo; + u8 hblank_lo; + u8 hactive_hblank_hi; + u8 vactive_lo; + u8 vblank_lo; + u8 vactive_vblank_hi; + u8 hsync_offset_lo; + u8 hsync_pulse_width_lo; + u8 vsync_offset_pulse_width_lo; + u8 hsync_vsync_offset_pulse_width_hi; + u8 width_mm_lo; + u8 height_mm_lo; + u8 width_height_mm_hi; + u8 hborder; + u8 vborder; + u8 misc; +} __attribute__((packed)); + +/* If it's not pixel timing, it'll be one of the below */ +struct detailed_data_string { + u8 str[13]; +} __attribute__((packed)); + +#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 +#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 +#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 +#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 + +struct detailed_data_monitor_range { + u8 min_vfreq; + u8 max_vfreq; + u8 min_hfreq_khz; + u8 max_hfreq_khz; + u8 pixel_clock_mhz; /* need to multiply by 10 */ + u8 flags; + union { + struct { + u8 reserved; + u8 hfreq_start_khz; /* need to multiply by 2 */ + u8 c; /* need to divide by 2 */ + __le16 m; + u8 k; + u8 j; /* need to divide by 2 */ + } __attribute__((packed)) gtf2; + struct { + u8 version; + u8 data1; /* high 6 bits: extra clock resolution */ + u8 data2; /* plus low 2 of above: max hactive */ + u8 supported_aspects; + u8 flags; /* preferred aspect and blanking support */ + u8 supported_scalings; + u8 preferred_refresh; + } __attribute__((packed)) cvt; + } __attribute__((packed)) formula; +} __attribute__((packed)); + +struct detailed_data_wpindex { + u8 white_yx_lo; /* Lower 2 bits each */ + u8 white_x_hi; + u8 white_y_hi; + u8 gamma; /* need to divide by 100 then add 1 */ +} __attribute__((packed)); + +struct detailed_data_color_point { + u8 windex1; + u8 wpindex1[3]; + u8 windex2; + u8 wpindex2[3]; +} __attribute__((packed)); + +struct cvt_timing { + u8 code[3]; +} __attribute__((packed)); + +struct detailed_non_pixel { + u8 pad1; + u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name + fb=color point data, fa=standard timing data, + f9=undefined, f8=mfg. reserved */ + u8 pad2; + union { + struct detailed_data_string str; + struct detailed_data_monitor_range range; + struct detailed_data_wpindex color; + struct std_timing timings[6]; + struct cvt_timing cvt[4]; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define EDID_DETAIL_EST_TIMINGS 0xf7 +#define EDID_DETAIL_CVT_3BYTE 0xf8 +#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 +#define EDID_DETAIL_STD_MODES 0xfa +#define EDID_DETAIL_MONITOR_CPDATA 0xfb +#define EDID_DETAIL_MONITOR_NAME 0xfc +#define EDID_DETAIL_MONITOR_RANGE 0xfd +#define EDID_DETAIL_MONITOR_STRING 0xfe +#define EDID_DETAIL_MONITOR_SERIAL 0xff + +struct detailed_timing { + __le16 pixel_clock; /* need to multiply by 10 KHz */ + union { + struct detailed_pixel_timing pixel_data; + struct detailed_non_pixel other_data; + } __attribute__((packed)) data; +} __attribute__((packed)); + +#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0) +#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1) +#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2) +#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3) +#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) +#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) +#define DRM_EDID_INPUT_DIGITAL (1 << 7) +#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */ + +#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) +#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) +#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2) +/* If analog */ +#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */ +/* If digital */ +#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3) +#define DRM_EDID_FEATURE_RGB (0 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3) +#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */ + +#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5) +#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) +#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) + +#define DRM_EDID_HDMI_DC_48 (1 << 6) +#define DRM_EDID_HDMI_DC_36 (1 << 5) +#define DRM_EDID_HDMI_DC_30 (1 << 4) +#define DRM_EDID_HDMI_DC_Y444 (1 << 3) + +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 2) +#define DRM_EDID_YCBCR420_DC_36 (1 << 1) +#define DRM_EDID_YCBCR420_DC_30 (1 << 0) +#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ + DRM_EDID_YCBCR420_DC_36 | \ + DRM_EDID_YCBCR420_DC_30) + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +struct edid { + u8 header[8]; + /* Vendor & product info */ + u8 mfg_id[2]; + u8 prod_code[2]; + u32 serial; /* FIXME: byte order */ + u8 mfg_week; + u8 mfg_year; + /* EDID version */ + u8 version; + u8 revision; + /* Display info: */ + u8 input; + u8 width_cm; + u8 height_cm; + u8 gamma; + u8 features; + /* Color characteristics */ + u8 red_green_lo; + u8 black_white_lo; + u8 red_x; + u8 red_y; + u8 green_x; + u8 green_y; + u8 blue_x; + u8 blue_y; + u8 white_x; + u8 white_y; + /* Est. timings and mfg rsvd timings*/ + struct est_timings established_timings; + /* Standard timings 1-8*/ + struct std_timing standard_timings[8]; + /* Detailing timings 1-4 */ + struct detailed_timing detailed_timings[4]; + /* Number of 128 byte ext. blocks */ + u8 extensions; + /* Checksum */ + u8 checksum; +} __attribute__((packed)); + +#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) + +/* Short Audio Descriptor */ +struct cea_sad { + u8 format; + u8 channels; /* max number of channels - 1 */ + u8 freq; + u8 byte2; /* meaning depends on format */ +}; + +struct drm_encoder; +struct drm_connector; +struct drm_connector_state; +struct drm_display_mode; + +int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); +int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); +int drm_av_sync_delay(struct drm_connector *connector, + const struct drm_display_mode *mode); + +#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE +struct edid *drm_load_edid_firmware(struct drm_connector *connector); +int __drm_set_edid_firmware_path(const char *path); +int __drm_get_edid_firmware_path(char *buf, size_t bufsize); +#else +static inline struct edid * +drm_load_edid_firmware(struct drm_connector *connector) +{ + return ERR_PTR(-ENOENT); +} +#endif + +bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); + +int +drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode); +int +drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode); + +void +drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); + +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_connector *connector, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range); + +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state); + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const uint8_t *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const uint8_t *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const uint8_t *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +bool drm_probe_ddc(struct i2c_adapter *adapter); +struct edid *drm_do_get_edid(struct drm_connector *connector, + int (*get_edid_block)(void *data, u8 *buf, unsigned int block, + size_t len), + void *data); +struct edid *drm_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter); +struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, + struct i2c_adapter *adapter); +struct edid *drm_edid_duplicate(const struct edid *edid); +int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); +int drm_add_override_edid_modes(struct drm_connector *connector); + +u8 drm_match_cea_mode(const struct drm_display_mode *to_match); +bool drm_detect_hdmi_monitor(struct edid *edid); +bool drm_detect_monitor_audio(struct edid *edid); +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode); +int drm_add_modes_noedid(struct drm_connector *connector, + int hdisplay, int vdisplay); +void drm_set_preferred_mode(struct drm_connector *connector, + int hpref, int vpref); + +int drm_edid_header_is_valid(const u8 *raw_edid); +bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, + bool *edid_corrupt); +bool drm_edid_is_valid(struct edid *edid); +void drm_edid_get_monitor_name(struct edid *edid, char *name, + int buflen); +struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, + int hsize, int vsize, int fresh, + bool rb); +struct drm_display_mode * +drm_display_mode_from_cea_vic(struct drm_device *dev, + u8 video_code); + +#endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h new file mode 100644 index 000000000..a60f5f155 --- /dev/null +++ b/include/drm/drm_encoder.h @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_ENCODER_H__ +#define __DRM_ENCODER_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_util.h> + +struct drm_encoder; + +/** + * struct drm_encoder_funcs - encoder controls + * + * Encoders sit between CRTCs and connectors. + */ +struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ + void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ + void (*destroy)(struct drm_encoder *encoder); + + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the encoder like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_encoder *encoder); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the encoder from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_encoder *encoder); +}; + +/** + * struct drm_encoder - central DRM encoder structure + * @dev: parent DRM device + * @head: list management + * @base: base KMS object + * @name: human readable name, can be overwritten by the driver + * @bridge: bridge associated to the encoder + * @funcs: control functions + * @helper_private: mid-layer private data + * + * CRTCs drive pixels to encoders, which convert them into signals + * appropriate for a given connector or set of connectors. + */ +struct drm_encoder { + struct drm_device *dev; + struct list_head head; + + struct drm_mode_object base; + char *name; + /** + * @encoder_type: + * + * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following + * encoder types are defined thus far: + * + * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A. + * + * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort. + * + * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel + * with a proprietary parallel connector. + * + * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, + * Component, SCART). + * + * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays + * + * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus. + * + * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel + * bus. + * + * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow + * mutliple DP MST streams to share one physical encoder. + */ + int encoder_type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the encoder. + */ + unsigned index; + + /** + * @possible_crtcs: Bitmask of potential CRTC bindings, using + * drm_crtc_index() as the index into the bitfield. The driver must set + * the bits for all &drm_crtc objects this encoder can be connected to + * before calling drm_dev_register(). + * + * You will get a WARN if you get this wrong in the driver. + * + * Note that since CRTC objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_crtcs; + + /** + * @possible_clones: Bitmask of potential sibling encoders for cloning, + * using drm_encoder_index() as the index into the bitfield. The driver + * must set the bits for all &drm_encoder objects which can clone a + * &drm_crtc together with this encoder before calling + * drm_dev_register(). Drivers should set the bit representing the + * encoder itself, too. Cloning bits should be set such that when two + * encoders can be used in a cloned configuration, they both should have + * each another bits set. + * + * As an exception to the above rule if the driver doesn't implement + * any cloning it can leave @possible_clones set to 0. The core will + * automagically fix this up by setting the bit for the encoder itself. + * + * You will get a WARN if you get this wrong in the driver. + * + * Note that since encoder objects can't be hotplugged the assigned indices + * are stable and hence known before registering all objects. + */ + uint32_t possible_clones; + + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ + struct drm_crtc *crtc; + + /** + * @bridge_chain: Bridges attached to this encoder. Drivers shall not + * access this field directly. + */ + struct list_head bridge_chain; + + const struct drm_encoder_funcs *funcs; + const struct drm_encoder_helper_funcs *helper_private; +}; + +#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) + +__printf(5, 6) +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); + +/** + * drm_encoder_index - find the index of a registered encoder + * @encoder: encoder to find index for + * + * Given a registered encoder, return the index of that encoder within a DRM + * device's list of encoders. + */ +static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder) +{ + return encoder->index; +} + +/** + * drm_encoder_mask - find the mask of a registered encoder + * @encoder: encoder to find mask for + * + * Given a registered encoder, return the mask bit of that encoder for an + * encoder's possible_clones field. + */ +static inline u32 drm_encoder_mask(const struct drm_encoder *encoder) +{ + return 1 << drm_encoder_index(encoder); +} + +/** + * drm_encoder_crtc_ok - can a given crtc drive a given encoder? + * @encoder: encoder to test + * @crtc: crtc to test + * + * Returns false if @encoder can't be driven by @crtc, true otherwise. + */ +static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); +} + +/** + * drm_encoder_find - find a &drm_encoder + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: encoder id + * + * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); + + return mo ? obj_to_encoder(mo) : NULL; +} + +void drm_encoder_cleanup(struct drm_encoder *encoder); + +/** + * drm_for_each_encoder_mask - iterate over encoders specified by bitmask + * @encoder: the loop cursor + * @dev: the DRM device + * @encoder_mask: bitmask of encoder indices + * + * Iterate over all encoders specified by bitmask. + */ +#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ + list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ + for_each_if ((encoder_mask) & drm_encoder_mask(encoder)) + +/** + * drm_for_each_encoder - iterate over all encoders + * @encoder: the loop cursor + * @dev: the DRM device + * + * Iterate over all encoders of @dev. + */ +#define drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) + +#endif diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h new file mode 100644 index 000000000..a09864f6d --- /dev/null +++ b/include/drm/drm_encoder_slave.h @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_ENCODER_SLAVE_H__ +#define __DRM_ENCODER_SLAVE_H__ + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> + +/** + * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation + * dependent. It will usually be a structure with DVO port + * data format settings or timings. It's not required for + * the new parameters to take effect until the next mode + * is set. + * + * Most of its members are analogous to the function pointers in + * &drm_encoder_helper_funcs and they can optionally be used to + * initialize the latter. Connector-like methods (e.g. @get_modes and + * @set_property) will typically be wrapped around and only be called + * if the encoder is the currently selected one for the connector. + */ +struct drm_encoder_slave_funcs { + void (*set_config)(struct drm_encoder *encoder, + void *params); + + void (*destroy)(struct drm_encoder *encoder); + void (*dpms)(struct drm_encoder *encoder, int mode); + void (*save)(struct drm_encoder *encoder); + void (*restore)(struct drm_encoder *encoder); + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + int (*mode_valid)(struct drm_encoder *encoder, + struct drm_display_mode *mode); + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*get_modes)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*create_resources)(struct drm_encoder *encoder, + struct drm_connector *connector); + int (*set_property)(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val); + +}; + +/** + * struct drm_encoder_slave - Slave encoder struct + * @base: DRM encoder object. + * @slave_funcs: Slave encoder callbacks. + * @slave_priv: Slave encoder private data. + * @bus_priv: Bus specific data. + * + * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the + * ones in @base. The former are never actually called by the common + * CRTC code, it's just a convenience for splitting the encoder + * functions in an upper, GPU-specific layer and a (hopefully) + * GPU-agnostic lower layer: It's the GPU driver responsibility to + * call the slave methods when appropriate. + * + * drm_i2c_encoder_init() provides a way to get an implementation of + * this. + */ +struct drm_encoder_slave { + struct drm_encoder base; + + const struct drm_encoder_slave_funcs *slave_funcs; + void *slave_priv; + void *bus_priv; +}; +#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) + +int drm_i2c_encoder_init(struct drm_device *dev, + struct drm_encoder_slave *encoder, + struct i2c_adapter *adap, + const struct i2c_board_info *info); + + +/** + * struct drm_i2c_encoder_driver + * + * Describes a device driver for an encoder connected to the GPU + * through an I2C bus. In addition to the entry points in @i2c_driver + * an @encoder_init function should be provided. It will be called to + * give the driver an opportunity to allocate any per-encoder data + * structures and to initialize the @slave_funcs and (optionally) + * @slave_priv members of @encoder. + */ +struct drm_i2c_encoder_driver { + struct i2c_driver i2c_driver; + + int (*encoder_init)(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder); + +}; +#define to_drm_i2c_encoder_driver(x) container_of((x), \ + struct drm_i2c_encoder_driver, \ + i2c_driver) + +/** + * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + */ +static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) +{ + return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv; +} + +/** + * drm_i2c_encoder_register - Register an I2C encoder driver + * @owner: Module containing the driver. + * @driver: Driver to be registered. + */ +static inline int drm_i2c_encoder_register(struct module *owner, + struct drm_i2c_encoder_driver *driver) +{ + return i2c_register_driver(owner, &driver->i2c_driver); +} + +/** + * drm_i2c_encoder_unregister - Unregister an I2C encoder driver + * @driver: Driver to be unregistered. + */ +static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver) +{ + i2c_del_driver(&driver->i2c_driver); +} + +void drm_i2c_encoder_destroy(struct drm_encoder *encoder); + + +/* + * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: + */ + +void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode); +bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_i2c_encoder_prepare(struct drm_encoder *encoder); +void drm_i2c_encoder_commit(struct drm_encoder *encoder); +void drm_i2c_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector); +void drm_i2c_encoder_save(struct drm_encoder *encoder); +void drm_i2c_encoder_restore(struct drm_encoder *encoder); + + +#endif diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h new file mode 100644 index 000000000..795aea1d0 --- /dev/null +++ b/include/drm/drm_fb_cma_helper.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_FB_CMA_HELPER_H__ +#define __DRM_FB_CMA_HELPER_H__ + +#include <linux/types.h> + +struct drm_framebuffer; +struct drm_plane_state; + +struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane); + +dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, + struct drm_plane_state *state, + unsigned int plane); + +#endif + diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h new file mode 100644 index 000000000..306aa3a60 --- /dev/null +++ b/include/drm/drm_fb_helper.h @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2006-2009 Red Hat Inc. + * Copyright (c) 2006-2008 Intel Corporation + * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> + * + * DRM framebuffer helper functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@linux.ie> + * Jesse Barnes <jesse.barnes@intel.com> + */ +#ifndef DRM_FB_HELPER_H +#define DRM_FB_HELPER_H + +struct drm_fb_helper; + +#include <drm/drm_client.h> +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <linux/kgdb.h> +#include <linux/vgaarb.h> + +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + +/** + * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size + * @fb_width: fbdev width + * @fb_height: fbdev height + * @surface_width: scanout buffer width + * @surface_height: scanout buffer height + * @surface_bpp: scanout buffer bpp + * @surface_depth: scanout buffer depth + * + * Note that the scanout surface width/height may be larger than the fbdev + * width/height. In case of multiple displays, the scanout surface is sized + * according to the largest width/height (so it is large enough for all CRTCs + * to scanout). But the fbdev width/height is sized to the minimum width/ + * height of all the displays. This ensures that fbcon fits on the smallest + * of the attached displays. fb_width/fb_height is used by + * drm_fb_helper_fill_info() to fill out the &fb_info.var structure. + */ +struct drm_fb_helper_surface_size { + u32 fb_width; + u32 fb_height; + u32 surface_width; + u32 surface_height; + u32 surface_bpp; + u32 surface_depth; +}; + +/** + * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library + * + * Driver callbacks used by the fbdev emulation helper library. + */ +struct drm_fb_helper_funcs { + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ + int (*fb_probe)(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +}; + +/** + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS + * @fb: Scanout framebuffer object + * @dev: DRM device + * @funcs: driver callbacks for fb helper + * @fbdev: emulated fbdev device info struct + * @pseudo_palette: fake palette of 16 colors + * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to + * the screen buffer + * @dirty_lock: spinlock protecting @dirty_clip + * @dirty_work: worker used to flush the framebuffer + * @resume_work: worker used during resume if the console lock is already taken + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a &struct drm_fb_helper_funcs with a few + * operations. + */ +struct drm_fb_helper { + /** + * @client: + * + * DRM client used by the generic fbdev emulation. + */ + struct drm_client_dev client; + + /** + * @buffer: + * + * Framebuffer used by the generic fbdev emulation. + */ + struct drm_client_buffer *buffer; + + struct drm_framebuffer *fb; + struct drm_device *dev; + const struct drm_fb_helper_funcs *funcs; + struct fb_info *fbdev; + u32 pseudo_palette[17]; + struct drm_clip_rect dirty_clip; + spinlock_t dirty_lock; + struct work_struct dirty_work; + struct work_struct resume_work; + + /** + * @lock: + * + * Top-level FBDEV helper lock. This protects all internal data + * structures and lists, such as @connector_info and @crtc_info. + * + * FIXME: fbdev emulation locking is a mess and long term we want to + * protect all helper internal state with this lock as well as reduce + * core KMS locking as much as possible. + */ + struct mutex lock; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ + struct list_head kernel_fb_list; + + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ + bool delayed_hotplug; + + /** + * @deferred_setup: + * + * If no outputs are connected (disconnected or unknown) the FB helper + * code will defer setup until at least one of the outputs shows up. + * This field keeps track of the status so that setup can be retried + * at every hotplug event until it succeeds eventually. + * + * Protected by @lock. + */ + bool deferred_setup; + + /** + * @preferred_bpp: + * + * Temporary storage for the driver's preferred BPP setting passed to + * FB helper initialization. This needs to be tracked so that deferred + * FB helper setup can pass this on. + * + * See also: @deferred_setup + */ + int preferred_bpp; +}; + +static inline struct drm_fb_helper * +drm_fb_helper_from_client(struct drm_client_dev *client) +{ + return container_of(client, struct drm_fb_helper, client); +} + +/** + * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers + * + * Helper define to register default implementations of drm_fb_helper + * functions. To be used in struct fb_ops of drm drivers. + */ +#define DRM_FB_HELPER_DEFAULT_OPS \ + .fb_check_var = drm_fb_helper_check_var, \ + .fb_set_par = drm_fb_helper_set_par, \ + .fb_setcmap = drm_fb_helper_setcmap, \ + .fb_blank = drm_fb_helper_blank, \ + .fb_pan_display = drm_fb_helper_pan_display, \ + .fb_debug_enter = drm_fb_helper_debug_enter, \ + .fb_debug_leave = drm_fb_helper_debug_leave, \ + .fb_ioctl = drm_fb_helper_ioctl + +#ifdef CONFIG_DRM_FBDEV_EMULATION +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs); +int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper); +void drm_fb_helper_fini(struct drm_fb_helper *helper); +int drm_fb_helper_blank(int blank, struct fb_info *info); +int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info); +int drm_fb_helper_set_par(struct fb_info *info); +int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); + +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); + +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_fill_info(struct fb_info *info, + struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist); + +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); +void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, + bool suspend); + +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); + +int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg); + +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); +int drm_fb_helper_debug_enter(struct fb_info *info); +int drm_fb_helper_debug_leave(struct fb_info *info); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); + +void drm_fbdev_generic_setup(struct drm_device *dev, + unsigned int preferred_bpp); +#else +static inline void drm_fb_helper_prepare(struct drm_device *dev, + struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ +} + +static inline int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper) +{ + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + + return 0; +} + +static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) +{ + if (helper && helper->dev) + helper->dev->fb_helper = NULL; +} + +static inline int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_set_par(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int +drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline struct fb_info * +drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + return NULL; +} + +static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline void +drm_fb_helper_fill_info(struct fb_info *info, + struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ +} + +static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + return 0; +} + +static inline void drm_fb_helper_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ +} + +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, + char __user *buf, size_t count, + loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, + bool suspend) +{ +} + +static inline void +drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend) +{ +} + +static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, + int bpp_sel) +{ + return 0; +} + +static inline int drm_fb_helper_debug_enter(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_debug_leave(struct fb_info *info) +{ + return 0; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + +static inline void +drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) +{ +} + +#endif + +/** + * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers + * @a: memory range, users of which are to be removed + * @name: requesting driver name + * @primary: also kick vga16fb if present + * + * This function removes framebuffer devices (initialized by firmware/bootloader) + * which use memory range described by @a. If @a is NULL all such devices are + * removed. + */ +static inline int +drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ +#if IS_REACHABLE(CONFIG_FB) + return remove_conflicting_framebuffers(a, name, primary); +#else + return 0; +#endif +} + +/** + * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices + * @pdev: PCI device + * @name: requesting driver name + * + * This function removes framebuffer devices (eg. initialized by firmware) + * using memory range configured for any of @pdev's memory bars. + * + * The function assumes that PCI device with shadowed ROM drives a primary + * display and so kicks out vga16fb. + */ +static inline int +drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + const char *name) +{ + int ret = 0; + + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ +#if IS_REACHABLE(CONFIG_FB) + ret = remove_conflicting_pci_framebuffers(pdev, name); +#endif + if (ret == 0) + ret = vga_remove_vgacon(pdev); + return ret; +} + +#endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h new file mode 100644 index 000000000..42d04607d --- /dev/null +++ b/include/drm/drm_file.h @@ -0,0 +1,426 @@ +/* + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_FILE_H_ +#define _DRM_FILE_H_ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/idr.h> + +#include <uapi/drm/drm.h> + +#include <drm/drm_prime.h> + +struct dma_fence; +struct drm_file; +struct drm_device; +struct device; +struct file; + +/* + * FIXME: Not sure we want to have drm_minor here in the end, but to avoid + * header include loops we need it here for now. + */ + +/* Note that the order of this enum is ABI (it determines + * /dev/dri/renderD* numbers). + */ +enum drm_minor_type { + DRM_MINOR_PRIMARY, + DRM_MINOR_CONTROL, + DRM_MINOR_RENDER, +}; + +/** + * struct drm_minor - DRM device minor structure + * + * This structure represents a DRM minor number for device nodes in /dev. + * Entirely opaque to drivers and should never be inspected directly by drivers. + * Drivers instead should only interact with &struct drm_file and of course + * &struct drm_device, which is also where driver-private data and resources can + * be attached to. + */ +struct drm_minor { + /* private: */ + int index; /* Minor device number */ + int type; /* Control or render */ + struct device *kdev; /* Linux device */ + struct drm_device *dev; + + struct dentry *debugfs_root; + + struct list_head debugfs_list; + struct mutex debugfs_lock; /* Protects debugfs_list. */ +}; + +/** + * struct drm_pending_event - Event queued up for userspace to read + * + * This represents a DRM event. Drivers can use this as a generic completion + * mechanism, which supports kernel-internal &struct completion, &struct dma_fence + * and also the DRM-specific &struct drm_event delivery mechanism. + */ +struct drm_pending_event { + /** + * @completion: + * + * Optional pointer to a kernel internal completion signalled when + * drm_send_event() is called, useful to internally synchronize with + * nonblocking operations. + */ + struct completion *completion; + + /** + * @completion_release: + * + * Optional callback currently only used by the atomic modeset helpers + * to clean up the reference count for the structure @completion is + * stored in. + */ + void (*completion_release)(struct completion *completion); + + /** + * @event: + * + * Pointer to the actual event that should be sent to userspace to be + * read using drm_read(). Can be optional, since nowadays events are + * also used to signal kernel internal threads with @completion or DMA + * transactions using @fence. + */ + struct drm_event *event; + + /** + * @fence: + * + * Optional DMA fence to unblock other hardware transactions which + * depend upon the nonblocking DRM operation this event represents. + */ + struct dma_fence *fence; + + /** + * @file_priv: + * + * &struct drm_file where @event should be delivered to. Only set when + * @event is set. + */ + struct drm_file *file_priv; + + /** + * @link: + * + * Double-linked list to keep track of this event. Can be used by the + * driver up to the point when it calls drm_send_event(), after that + * this list entry is owned by the core for its own book-keeping. + */ + struct list_head link; + + /** + * @pending_link: + * + * Entry on &drm_file.pending_event_list, to keep track of all pending + * events for @file_priv, to allow correct unwinding of them when + * userspace closes the file before the event is delivered. + */ + struct list_head pending_link; +}; + +/** + * struct drm_file - DRM file private data + * + * This structure tracks DRM state per open file descriptor. + */ +struct drm_file { + /** + * @authenticated: + * + * Whether the client is allowed to submit rendering, which for legacy + * nodes means it must be authenticated. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ + bool authenticated; + + /** + * @stereo_allowed: + * + * True when the client has asked us to expose stereo 3D mode flags. + */ + bool stereo_allowed; + + /** + * @universal_planes: + * + * True if client understands CRTC primary planes and cursor planes + * in the plane list. Automatically set when @atomic is set. + */ + bool universal_planes; + + /** @atomic: True if client understands atomic properties. */ + bool atomic; + + /** + * @aspect_ratio_allowed: + * + * True, if client can handle picture aspect ratios, and has requested + * to pass this information along with the mode. + */ + bool aspect_ratio_allowed; + + /** + * @writeback_connectors: + * + * True if client understands writeback connectors + */ + bool writeback_connectors; + + /** + * @was_master: + * + * This client has or had, master capability. Protected by struct + * &drm_device.master_mutex. + * + * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the + * client is or was master in the past. + */ + bool was_master; + + /** + * @is_master: + * + * This client is the creator of @master. Protected by struct + * &drm_device.master_mutex. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ + bool is_master; + + /** + * @master: + * + * Master this node is currently associated with. Protected by struct + * &drm_device.master_mutex, and serialized by @master_lookup_lock. + * + * Only relevant if drm_is_primary_client() returns true. Note that + * this only matches &drm_device.master if the master is the currently + * active one. + * + * When dereferencing this pointer, either hold struct + * &drm_device.master_mutex for the duration of the pointer's use, or + * use drm_file_get_master() if struct &drm_device.master_mutex is not + * currently held and there is no other need to hold it. This prevents + * @master from being freed during use. + * + * See also @authentication and @is_master and the :ref:`section on + * primary nodes and authentication <drm_primary_node>`. + */ + struct drm_master *master; + + /** @master_lock: Serializes @master. */ + spinlock_t master_lookup_lock; + + /** @pid: Process that opened this file. */ + struct pid *pid; + + /** @magic: Authentication magic, see @authenticated. */ + drm_magic_t magic; + + /** + * @lhead: + * + * List of all open files of a DRM device, linked into + * &drm_device.filelist. Protected by &drm_device.filelist_mutex. + */ + struct list_head lhead; + + /** @minor: &struct drm_minor for this file. */ + struct drm_minor *minor; + + /** + * @object_idr: + * + * Mapping of mm object handles to object pointers. Used by the GEM + * subsystem. Protected by @table_lock. + */ + struct idr object_idr; + + /** @table_lock: Protects @object_idr. */ + spinlock_t table_lock; + + /** @syncobj_idr: Mapping of sync object handles to object pointers. */ + struct idr syncobj_idr; + /** @syncobj_table_lock: Protects @syncobj_idr. */ + spinlock_t syncobj_table_lock; + + /** @filp: Pointer to the core file structure. */ + struct file *filp; + + /** + * @driver_priv: + * + * Optional pointer for driver private data. Can be allocated in + * &drm_driver.open and should be freed in &drm_driver.postclose. + */ + void *driver_priv; + + /** + * @fbs: + * + * List of &struct drm_framebuffer associated with this file, using the + * &drm_framebuffer.filp_head entry. + * + * Protected by @fbs_lock. Note that the @fbs list holds a reference on + * the framebuffer object to prevent it from untimely disappearing. + */ + struct list_head fbs; + + /** @fbs_lock: Protects @fbs. */ + struct mutex fbs_lock; + + /** + * @blobs: + * + * User-created blob properties; this retains a reference on the + * property. + * + * Protected by @drm_mode_config.blob_lock; + */ + struct list_head blobs; + + /** @event_wait: Waitqueue for new events added to @event_list. */ + wait_queue_head_t event_wait; + + /** + * @pending_event_list: + * + * List of pending &struct drm_pending_event, used to clean up pending + * events in case this file gets closed before the event is signalled. + * Uses the &drm_pending_event.pending_link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head pending_event_list; + + /** + * @event_list: + * + * List of &struct drm_pending_event, ready for delivery to userspace + * through drm_read(). Uses the &drm_pending_event.link entry. + * + * Protect by &drm_device.event_lock. + */ + struct list_head event_list; + + /** + * @event_space: + * + * Available event space to prevent userspace from + * exhausting kernel memory. Currently limited to the fairly arbitrary + * value of 4KB. + */ + int event_space; + + /** @event_read_lock: Serializes drm_read(). */ + struct mutex event_read_lock; + + /** + * @prime: + * + * Per-file buffer caches used by the PRIME buffer sharing code. + */ + struct drm_prime_file_private prime; + + /* private: */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) + unsigned long lock_count; /* DRI1 legacy lock count */ +#endif +}; + +/** + * drm_is_primary_client - is this an open file of the primary node + * @file_priv: DRM file + * + * Returns true if this is an open file of the primary node, i.e. + * &drm_file.minor of @file_priv is a primary minor. + * + * See also the :ref:`section on primary nodes and authentication + * <drm_primary_node>`. + */ +static inline bool drm_is_primary_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_PRIMARY; +} + +/** + * drm_is_render_client - is this an open file of the render node + * @file_priv: DRM file + * + * Returns true if this is an open file of the render node, i.e. + * &drm_file.minor of @file_priv is a render minor. + * + * See also the :ref:`section on render nodes <drm_render_node>`. + */ +static inline bool drm_is_render_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + +int drm_open(struct inode *inode, struct file *filp); +ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +int drm_release(struct inode *inode, struct file *filp); +int drm_release_noglobal(struct inode *inode, struct file *filp); +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); +int drm_event_reserve_init_locked(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +int drm_event_reserve_init(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_pending_event *p, + struct drm_event *e); +void drm_event_cancel_free(struct drm_device *dev, + struct drm_pending_event *p); +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e); +void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); + +struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); + +#ifdef CONFIG_MMU +struct drm_vma_offset_manager; +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr); +#endif /* CONFIG_MMU */ + + +#endif /* _DRM_FILE_H_ */ diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h new file mode 100644 index 000000000..553210c02 --- /dev/null +++ b/include/drm/drm_fixed.h @@ -0,0 +1,211 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Christian König + */ +#ifndef DRM_FIXED_H +#define DRM_FIXED_H + +#include <linux/math64.h> + +typedef union dfixed { + u32 full; +} fixed20_12; + + +#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */ +#define dfixed_const_half(A) (u32)(((A) << 12) + 2048) +#define dfixed_const_666(A) (u32)(((A) << 12) + 2731) +#define dfixed_const_8(A) (u32)(((A) << 12) + 3277) +#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12) +#define dfixed_init(A) { .full = dfixed_const((A)) } +#define dfixed_init_half(A) { .full = dfixed_const_half((A)) } +#define dfixed_trunc(A) ((A).full >> 12) +#define dfixed_frac(A) ((A).full & ((1 << 12) - 1)) + +static inline u32 dfixed_floor(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + return dfixed_const(non_frac); +} + +static inline u32 dfixed_ceil(fixed20_12 A) +{ + u32 non_frac = dfixed_trunc(A); + + if (A.full > dfixed_const(non_frac)) + return dfixed_const(non_frac + 1); + else + return dfixed_const(non_frac); +} + +static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) +{ + u64 tmp = ((u64)A.full << 13); + + do_div(tmp, B.full); + tmp += 1; + tmp /= 2; + return lower_32_bits(tmp); +} + +#define DRM_FIXED_POINT 32 +#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) +#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) +#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) +#define DRM_FIXED_EPSILON 1LL +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) + +static inline s64 drm_int2fixp(int a) +{ + return ((s64)a) << DRM_FIXED_POINT; +} + +static inline int drm_fixp2int(s64 a) +{ + return ((s64)a) >> DRM_FIXED_POINT; +} + +static inline int drm_fixp2int_ceil(s64 a) +{ + if (a > 0) + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); + else + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); +} + +static inline unsigned drm_fixp_msbset(s64 a) +{ + unsigned shift, sign = (a >> 63) & 1; + + for (shift = 62; shift > 0; --shift) + if (((a >> shift) & 1) != sign) + return shift; + + return 0; +} + +static inline s64 drm_fixp_mul(s64 a, s64 b) +{ + unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); + s64 result; + + if (shift > 61) { + shift = shift - 61; + a >>= (shift >> 1) + (shift & 1); + b >>= shift >> 1; + } else + shift = 0; + + result = a * b; + + if (shift > DRM_FIXED_POINT) + return result << (shift - DRM_FIXED_POINT); + + if (shift < DRM_FIXED_POINT) + return result >> (DRM_FIXED_POINT - shift); + + return result; +} + +static inline s64 drm_fixp_div(s64 a, s64 b) +{ + unsigned shift = 62 - drm_fixp_msbset(a); + s64 result; + + a <<= shift; + + if (shift < DRM_FIXED_POINT) + b >>= (DRM_FIXED_POINT - shift); + + result = div64_s64(a, b); + + if (shift > DRM_FIXED_POINT) + return result >> (shift - DRM_FIXED_POINT); + + return result; +} + +static inline s64 drm_fixp_from_fraction(s64 a, s64 b) +{ + s64 res; + bool a_neg = a < 0; + bool b_neg = b < 0; + u64 a_abs = a_neg ? -a : a; + u64 b_abs = b_neg ? -b : b; + u64 rem; + + /* determine integer part */ + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem); + + /* determine fractional part */ + { + u32 i = DRM_FIXED_POINT; + + do { + rem <<= 1; + res_abs <<= 1; + if (rem >= b_abs) { + res_abs |= 1; + rem -= b_abs; + } + } while (--i != 0); + } + + /* round up LSB */ + { + u64 summand = (rem << 1) >= b_abs; + + res_abs += summand; + } + + res = (s64) res_abs; + if (a_neg ^ b_neg) + res = -res; + return res; +} + +static inline s64 drm_fixp_exp(s64 x) +{ + s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); + s64 sum = DRM_FIXED_ONE, term, y = x; + u64 count = 1; + + if (x < 0) + y = -1 * x; + + term = y; + + while (term >= tolerance) { + sum = sum + term; + count = count + 1; + term = drm_fixp_mul(term, div64_s64(y, count)); + } + + if (x < 0) + sum = drm_fixp_div(DRM_FIXED_ONE, sum); + + return sum; +} + +#endif diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h new file mode 100644 index 000000000..21c3d512d --- /dev/null +++ b/include/drm/drm_flip_work.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2013 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_FLIP_WORK_H +#define DRM_FLIP_WORK_H + +#include <linux/kfifo.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +/** + * DOC: flip utils + * + * Util to queue up work to run from work-queue context after flip/vblank. + * Typically this can be used to defer unref of framebuffer's, cursor + * bo's, etc until after vblank. The APIs are all thread-safe. + * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called + * in atomic context. + */ + +struct drm_flip_work; + +/* + * drm_flip_func_t - callback function + * + * @work: the flip work + * @val: value queued via drm_flip_work_queue() + * + * Callback function to be called for each of the queue'd work items after + * drm_flip_work_commit() is called. + */ +typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); + +/** + * struct drm_flip_task - flip work task + * @node: list entry element + * @data: data to pass to &drm_flip_work.func + */ +struct drm_flip_task { + struct list_head node; + void *data; +}; + +/** + * struct drm_flip_work - flip work queue + * @name: debug name + * @func: callback fxn called for each committed item + * @worker: worker which calls @func + * @queued: queued tasks + * @commited: commited tasks + * @lock: lock to access queued and commited lists + */ +struct drm_flip_work { + const char *name; + drm_flip_func_t func; + struct work_struct worker; + struct list_head queued; + struct list_head commited; + spinlock_t lock; +}; + +struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); +void drm_flip_work_queue_task(struct drm_flip_work *work, + struct drm_flip_task *task); +void drm_flip_work_queue(struct drm_flip_work *work, void *val); +void drm_flip_work_commit(struct drm_flip_work *work, + struct workqueue_struct *wq); +void drm_flip_work_init(struct drm_flip_work *work, + const char *name, drm_flip_func_t func); +void drm_flip_work_cleanup(struct drm_flip_work *work); + +#endif /* DRM_FLIP_WORK_H */ diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h new file mode 100644 index 000000000..5f9e37032 --- /dev/null +++ b/include/drm/drm_format_helper.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2016 Noralf Trønnes + */ + +#ifndef __LINUX_DRM_FORMAT_HELPER_H +#define __LINUX_DRM_FORMAT_HELPER_H + +struct drm_framebuffer; +struct drm_rect; + +void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_rect *clip); +void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_rect *clip); +void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, + struct drm_rect *clip, bool cached); +void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_rect *clip, bool swab); +void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch, + void *vaddr, struct drm_framebuffer *fb, + struct drm_rect *clip, bool swab); +void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch, + void *vaddr, struct drm_framebuffer *fb, + struct drm_rect *clip); +void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_rect *clip); + +#endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h new file mode 100644 index 000000000..156b122c0 --- /dev/null +++ b/include/drm/drm_fourcc.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef __DRM_FOURCC_H__ +#define __DRM_FOURCC_H__ + +#include <linux/types.h> +#include <uapi/drm/drm_fourcc.h> + +/* + * DRM formats are little endian. Define host endian variants for the + * most common formats here, to reduce the #ifdefs needed in drivers. + * + * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in + * case the format can't be specified otherwise, so we don't end up + * with two values describing the same format. + */ +#ifdef __BIG_ENDIAN +# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 +#else +# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 +# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 +#endif + +struct drm_device; +struct drm_mode_fb_cmd2; + +/** + * struct drm_format_info - information about a DRM format + */ +struct drm_format_info { + /** @format: 4CC format identifier (DRM_FORMAT_*) */ + u32 format; + + /** + * @depth: + * + * Color depth (number of bits per pixel excluding padding bits), + * valid for a subset of RGB formats only. This is a legacy field, do + * not use in new code and set to 0 for new formats. + */ + u8 depth; + + /** @num_planes: Number of color planes (1 to 3) */ + u8 num_planes; + + union { + /** + * @cpp: + * + * Number of bytes per pixel (per plane), this is aliased with + * @char_per_block. It is deprecated in favour of using the + * triplet @char_per_block, @block_w, @block_h for better + * describing the pixel format. + */ + u8 cpp[4]; + + /** + * @char_per_block: + * + * Number of bytes per block (per plane), where blocks are + * defined as a rectangle of pixels which are stored next to + * each other in a byte aligned memory region. Together with + * @block_w and @block_h this is used to properly describe tiles + * in tiled formats or to describe groups of pixels in packed + * formats for which the memory needed for a single pixel is not + * byte aligned. + * + * @cpp has been kept for historical reasons because there are + * a lot of places in drivers where it's used. In drm core for + * generic code paths the preferred way is to use + * @char_per_block, drm_format_info_block_width() and + * drm_format_info_block_height() which allows handling both + * block and non-block formats in the same way. + * + * For formats that are intended to be used only with non-linear + * modifiers both @cpp and @char_per_block must be 0 in the + * generic format table. Drivers could supply accurate + * information from their drm_mode_config.get_format_info hook + * if they want the core to be validating the pitch. + */ + u8 char_per_block[4]; + }; + + /** + * @block_w: + * + * Block width in pixels, this is intended to be accessed through + * drm_format_info_block_width() + */ + u8 block_w[4]; + + /** + * @block_h: + * + * Block height in pixels, this is intended to be accessed through + * drm_format_info_block_height() + */ + u8 block_h[4]; + + /** @hsub: Horizontal chroma subsampling factor */ + u8 hsub; + /** @vsub: Vertical chroma subsampling factor */ + u8 vsub; + + /** @has_alpha: Does the format embeds an alpha component? */ + bool has_alpha; + + /** @is_yuv: Is it a YUV format? */ + bool is_yuv; +}; + +/** + * struct drm_format_name_buf - name of a DRM format + * @str: string buffer containing the format name + */ +struct drm_format_name_buf { + char str[32]; +}; + +/** + * drm_format_info_is_yuv_packed - check that the format info matches a YUV + * format with data laid in a single plane + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a packed YUV format. + */ +static inline bool +drm_format_info_is_yuv_packed(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 1; +} + +/** + * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV + * format with data laid in two planes (luminance and chrominance) + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a semiplanar YUV format. + */ +static inline bool +drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 2; +} + +/** + * drm_format_info_is_yuv_planar - check that the format info matches a YUV + * format with data laid in three planes (one for each YUV component) + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a planar YUV format. + */ +static inline bool +drm_format_info_is_yuv_planar(const struct drm_format_info *info) +{ + return info->is_yuv && info->num_planes == 3; +} + +/** + * drm_format_info_is_yuv_sampling_410 - check that the format info matches a + * YUV format with 4:1:0 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:1:0 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 4 && info->vsub == 4; +} + +/** + * drm_format_info_is_yuv_sampling_411 - check that the format info matches a + * YUV format with 4:1:1 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:1:1 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 4 && info->vsub == 1; +} + +/** + * drm_format_info_is_yuv_sampling_420 - check that the format info matches a + * YUV format with 4:2:0 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:2:0 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 2 && info->vsub == 2; +} + +/** + * drm_format_info_is_yuv_sampling_422 - check that the format info matches a + * YUV format with 4:2:2 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:2:2 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 2 && info->vsub == 1; +} + +/** + * drm_format_info_is_yuv_sampling_444 - check that the format info matches a + * YUV format with 4:4:4 sub-sampling + * @info: format info + * + * Returns: + * A boolean indicating whether the format info matches a YUV format with 4:4:4 + * sub-sampling. + */ +static inline bool +drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) +{ + return info->is_yuv && info->hsub == 1 && info->vsub == 1; +} + +/** + * drm_format_info_plane_width - width of the plane given the first plane + * @info: pixel format info + * @width: width of the first plane + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +static inline +int drm_format_info_plane_width(const struct drm_format_info *info, int width, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return width; + + return width / info->hsub; +} + +/** + * drm_format_info_plane_height - height of the plane given the first plane + * @info: pixel format info + * @height: height of the first plane + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +static inline +int drm_format_info_plane_height(const struct drm_format_info *info, int height, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return height; + + return height / info->vsub; +} + +const struct drm_format_info *__drm_format_info(u32 format); +const struct drm_format_info *drm_format_info(u32 format); +const struct drm_format_info * +drm_get_format_info(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd); +uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); +uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, + uint32_t bpp, uint32_t depth); +unsigned int drm_format_info_block_width(const struct drm_format_info *info, + int plane); +unsigned int drm_format_info_block_height(const struct drm_format_info *info, + int plane); +uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, + int plane, unsigned int buffer_width); +const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf); + +#endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h new file mode 100644 index 000000000..be658ebbe --- /dev/null +++ b/include/drm/drm_framebuffer.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_FRAMEBUFFER_H__ +#define __DRM_FRAMEBUFFER_H__ + +#include <linux/ctype.h> +#include <linux/list.h> +#include <linux/sched.h> + +#include <drm/drm_mode_object.h> + +struct drm_clip_rect; +struct drm_device; +struct drm_file; +struct drm_format_info; +struct drm_framebuffer; +struct drm_gem_object; + +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ +struct drm_framebuffer_funcs { + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by calling + * &drm_mode_config_funcs.fb_create. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. + */ + void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in &struct drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*create_handle)(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle); + /** + * @dirty: + * + * Optional callback for the dirty fb IOCTL. + * + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. + * + * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement + * this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips); +}; + +/** + * struct drm_framebuffer - frame buffer object + * + * Note that the fb is refcounted for the benefit of driver internals, + * for example some hw, disabling a CRTC/plane is asynchronous, and + * scanout does not actually complete until the next vblank. So some + * cleanup (like releasing the reference(s) on the backing GEM bo(s)) + * should be deferred. In cases like this, the driver would like to + * hold a ref to the fb even though it has already been removed from + * userspace perspective. See drm_framebuffer_get() and + * drm_framebuffer_put(). + * + * The refcount is stored inside the mode object @base. + */ +struct drm_framebuffer { + /** + * @dev: DRM device this framebuffer belongs to + */ + struct drm_device *dev; + /** + * @head: Place on the &drm_mode_config.fb_list, access protected by + * &drm_mode_config.fb_lock. + */ + struct list_head head; + + /** + * @base: base modeset object structure, contains the reference count. + */ + struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + + /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** + * @funcs: framebuffer vfunc table + */ + const struct drm_framebuffer_funcs *funcs; + /** + * @pitches: Line stride per buffer. For userspace created object this + * is copied from drm_mode_fb_cmd2. + */ + unsigned int pitches[4]; + /** + * @offsets: Offset from buffer start to the actual pixel data in bytes, + * per buffer. For userspace created object this is copied from + * drm_mode_fb_cmd2. + * + * Note that this is a linear offset and does not take into account + * tiling or buffer laytou per @modifier. It meant to be used when the + * actual pixel data for this framebuffer plane starts at an offset, + * e.g. when multiple planes are allocated within the same backing + * storage buffer object. For tiled layouts this generally means it + * @offsets must at least be tile-size aligned, but hardware often has + * stricter requirements. + * + * This should not be used to specifiy x/y pixel offsets into the buffer + * data (even for linear buffers). Specifying an x/y pixel offset is + * instead done through the source rectangle in &struct drm_plane_state. + */ + unsigned int offsets[4]; + /** + * @modifier: Data layout modifier. This is used to describe + * tiling, or also special layouts (like compression) of auxiliary + * buffers. For userspace created object this is copied from + * drm_mode_fb_cmd2. + */ + uint64_t modifier; + /** + * @width: Logical width of the visible area of the framebuffer, in + * pixels. + */ + unsigned int width; + /** + * @height: Logical height of the visible area of the framebuffer, in + * pixels. + */ + unsigned int height; + /** + * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or + * DRM_MODE_FB_MODIFIERS. + */ + int flags; + /** + * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_x; + /** + * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR + * universal plane. + */ + int hot_y; + /** + * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. + */ + struct list_head filp_head; + /** + * @obj: GEM objects backing the framebuffer, one per plane (optional). + * + * This is used by the GEM framebuffer helpers, see e.g. + * drm_gem_fb_create(). + */ + struct drm_gem_object *obj[4]; +}; + +#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) + +int drm_framebuffer_init(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id); +void drm_framebuffer_remove(struct drm_framebuffer *fb); +void drm_framebuffer_cleanup(struct drm_framebuffer *fb); +void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); + +/** + * drm_framebuffer_get - acquire a framebuffer reference + * @fb: DRM framebuffer + * + * This function increments the framebuffer's reference count. + */ +static inline void drm_framebuffer_get(struct drm_framebuffer *fb) +{ + drm_mode_object_get(&fb->base); +} + +/** + * drm_framebuffer_put - release a framebuffer reference + * @fb: DRM framebuffer + * + * This function decrements the framebuffer's reference count and frees the + * framebuffer if the reference count drops to zero. + */ +static inline void drm_framebuffer_put(struct drm_framebuffer *fb) +{ + drm_mode_object_put(&fb->base); +} + +/** + * drm_framebuffer_read_refcount - read the framebuffer reference count. + * @fb: framebuffer + * + * This functions returns the framebuffer's reference count. + */ +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) +{ + return kref_read(&fb->base.refcount); +} + +/** + * drm_framebuffer_assign - store a reference to the fb + * @p: location to store framebuffer + * @fb: new framebuffer (maybe NULL) + * + * This functions sets the location to store a reference to the framebuffer, + * unreferencing the framebuffer that was previously stored in that location. + */ +static inline void drm_framebuffer_assign(struct drm_framebuffer **p, + struct drm_framebuffer *fb) +{ + if (fb) + drm_framebuffer_get(fb); + if (*p) + drm_framebuffer_put(*p); + *p = fb; +} + +/* + * drm_for_each_fb - iterate over all framebuffers + * @fb: the loop cursor + * @dev: the DRM device + * + * Iterate over all framebuffers of @dev. User must hold + * &drm_mode_config.fb_lock. + */ +#define drm_for_each_fb(fb, dev) \ + for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ + fb = list_first_entry(&(dev)->mode_config.fb_list, \ + struct drm_framebuffer, head); \ + &fb->head != (&(dev)->mode_config.fb_list); \ + fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + +/** + * struct drm_afbc_framebuffer - a special afbc frame buffer object + * + * A derived class of struct drm_framebuffer, dedicated for afbc use cases. + */ +struct drm_afbc_framebuffer { + /** + * @base: base framebuffer structure. + */ + struct drm_framebuffer base; + /** + * @block_width: width of a single afbc block + */ + u32 block_width; + /** + * @block_height: height of a single afbc block + */ + u32 block_height; + /** + * @aligned_width: aligned frame buffer width + */ + u32 aligned_width; + /** + * @aligned_height: aligned frame buffer height + */ + u32 aligned_height; + /** + * @offset: offset of the first afbc header + */ + u32 offset; + /** + * @afbc_size: minimum size of afbc buffer + */ + u32 afbc_size; +}; + +#define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base) + +#endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h new file mode 100644 index 000000000..337a48321 --- /dev/null +++ b/include/drm/drm_gem.h @@ -0,0 +1,422 @@ +#ifndef __DRM_GEM_H__ +#define __DRM_GEM_H__ + +/* + * GEM Graphics Execution Manager Driver Interfaces + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/kref.h> +#include <linux/dma-resv.h> + +#include <drm/drm_vma_manager.h> + +struct drm_gem_object; + +/** + * struct drm_gem_object_funcs - GEM object functions + */ +struct drm_gem_object_funcs { + /** + * @free: + * + * Deconstructor for drm_gem_objects. + * + * This callback is mandatory. + */ + void (*free)(struct drm_gem_object *obj); + + /** + * @open: + * + * Called upon GEM handle creation. + * + * This callback is optional. + */ + int (*open)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @close: + * + * Called upon GEM handle release. + * + * This callback is optional. + */ + void (*close)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + * + * This callback is optional. + */ + void (*print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** + * @export: + * + * Export backing buffer as a &dma_buf. + * If this is not set drm_gem_prime_export() is used. + * + * This callback is optional. + */ + struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); + + /** + * @pin: + * + * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. + * + * This callback is optional. + */ + int (*pin)(struct drm_gem_object *obj); + + /** + * @unpin: + * + * Unpin backing buffer. Used by the drm_gem_map_detach() helper. + * + * This callback is optional. + */ + void (*unpin)(struct drm_gem_object *obj); + + /** + * @get_sg_table: + * + * Returns a Scatter-Gather table representation of the buffer. + * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. + * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() + * in drm_gem_unmap_buf(), therefore these helpers and this callback + * here cannot be used for sg tables pointing at driver private memory + * ranges. + * + * See also drm_prime_pages_to_sg(). + */ + struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); + + /** + * @vmap: + * + * Returns a virtual address for the buffer. Used by the + * drm_gem_dmabuf_vmap() helper. + * + * This callback is optional. + */ + void *(*vmap)(struct drm_gem_object *obj); + + /** + * @vunmap: + * + * Releases the address previously returned by @vmap. Used by the + * drm_gem_dmabuf_vunmap() helper. + * + * This callback is optional. + */ + void (*vunmap)(struct drm_gem_object *obj, void *vaddr); + + /** + * @mmap: + * + * Handle mmap() of the gem object, setup vma accordingly. + * + * This callback is optional. + * + * The callback is used by both drm_gem_mmap_obj() and + * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not + * used, the @mmap callback must set vma->vm_ops instead. + */ + int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); + + /** + * @vm_ops: + * + * Virtual memory operations used with mmap. + * + * This is optional but necessary for mmap support. + */ + const struct vm_operations_struct *vm_ops; +}; + +/** + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. + */ +struct drm_gem_object { + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() + * or drm_gem_object_put() to release a reference to a GEM + * buffer object. + */ + struct kref refcount; + + /** + * @handle_count: + * + * This is the GEM file_priv handle count of this object. + * + * Each handle also holds a reference. Note that when the handle_count + * drops to 0 any global names (e.g. the id in the flink namespace) will + * be cleared. + * + * Protected by &drm_device.object_name_lock. + */ + unsigned handle_count; + + /** + * @dev: DRM dev this object belongs to. + */ + struct drm_device *dev; + + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous CMA memory, special reserved blocks). In this + * case @filp is NULL. + */ + struct file *filp; + + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ + struct drm_vma_offset_node vma_node; + + /** + * @size: + * + * Size of the object, in bytes. Immutable over the object's + * lifetime. + */ + size_t size; + + /** + * @name: + * + * Global name for this object, starts at 1. 0 means unnamed. + * Access is covered by &drm_device.object_name_lock. This is used by + * the GEM_FLINK and GEM_OPEN ioctls. + */ + int name; + + /** + * @dma_buf: + * + * dma-buf associated with this GEM object. + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by &drm_device.object_name_lock. + */ + struct dma_buf *dma_buf; + + /** + * @import_attach: + * + * dma-buf attachment backing this object. + * + * Any foreign dma_buf imported as a gem object has this set to the + * attachment point for the device. This is invariant over the lifetime + * of a gem object. + * + * The &drm_driver.gem_free_object_unlocked callback is responsible for + * cleaning up the dma_buf attachment and references acquired at import + * time. + * + * Note that the drm gem/prime core does not depend upon drivers setting + * this field any more. So for drivers where this doesn't make sense + * (e.g. virtual devices or a displaylink behind an usb bus) they can + * simply leave it as NULL. + */ + struct dma_buf_attachment *import_attach; + + /** + * @resv: + * + * Pointer to reservation object associated with the this GEM object. + * + * Normally (@resv == &@_resv) except for imported GEM objects. + */ + struct dma_resv *resv; + + /** + * @_resv: + * + * A reservation object for this GEM object. + * + * This is unused for imported GEM objects. + */ + struct dma_resv _resv; + + /** + * @funcs: + * + * Optional GEM object functions. If this is set, it will be used instead of the + * corresponding &drm_driver GEM callbacks. + * + * New drivers should use this. + * + */ + const struct drm_gem_object_funcs *funcs; +}; + +/** + * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for GEM based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap,\ + } + +void drm_gem_object_release(struct drm_gem_object *obj); +void drm_gem_object_free(struct kref *kref); +int drm_gem_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size); +void drm_gem_vm_open(struct vm_area_struct *vma); +void drm_gem_vm_close(struct vm_area_struct *vma); +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, + struct vm_area_struct *vma); +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +/** + * drm_gem_object_get - acquire a GEM buffer object reference + * @obj: GEM buffer object + * + * This function acquires an additional reference to @obj. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline void drm_gem_object_get(struct drm_gem_object *obj) +{ + kref_get(&obj->refcount); +} + +__attribute__((nonnull)) +static inline void +__drm_gem_object_put(struct drm_gem_object *obj) +{ + kref_put(&obj->refcount, drm_gem_object_free); +} + +/** + * drm_gem_object_put - drop a GEM buffer object reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. + */ +static inline void +drm_gem_object_put(struct drm_gem_object *obj) +{ + if (obj) + __drm_gem_object_put(obj); +} + +void drm_gem_object_put_locked(struct drm_gem_object *obj); + +int drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); +int drm_gem_handle_delete(struct drm_file *filp, u32 handle); + + +void drm_gem_free_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset(struct drm_gem_object *obj); +int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); + +struct page **drm_gem_get_pages(struct drm_gem_object *obj); +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed); + +int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, + int count, struct drm_gem_object ***objs_out); +struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout); +int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx); +void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx); +int drm_gem_fence_array_add(struct xarray *fence_array, + struct dma_fence *fence); +int drm_gem_fence_array_add_implicit(struct xarray *fence_array, + struct drm_gem_object *obj, + bool write); +int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + u32 handle, u64 *offset); +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); + +#endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h new file mode 100644 index 000000000..2bfa25026 --- /dev/null +++ b/include/drm/drm_gem_cma_helper.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_GEM_CMA_HELPER_H__ +#define __DRM_GEM_CMA_HELPER_H__ + +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_gem.h> + +struct drm_mode_create_dumb; + +/** + * struct drm_gem_cma_object - GEM object backed by CMA memory allocations + * @base: base GEM object + * @paddr: physical address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. + * @vaddr: kernel virtual address of the backing memory + */ +struct drm_gem_cma_object { + struct drm_gem_object base; + dma_addr_t paddr; + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM CMA */ + void *vaddr; +}; + +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) + +#ifndef CONFIG_MMU +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_cma_get_unmapped_area, +#else +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for CMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_CMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_cma_mmap,\ + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + } + +/* free GEM object */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* set vm_flags and we can change the VM attribute to other one at here */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); + +/* allocate physical memory */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + size_t size); + +extern const struct vm_operations_struct drm_gem_cma_vm_ops; + +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +struct drm_gem_object * +drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); + +/** + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations + * @dumb_create_func: callback function for .dumb_create + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that + * override the default implementation of &struct rm_driver.dumb_create. Use + * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ + .gem_create_object = drm_gem_cma_create_object_default_funcs, \ + .dumb_create = (dumb_create_func), \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \ + .gem_prime_mmap = drm_gem_cma_prime_mmap + +/** + * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address + * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS \ + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) + +/** + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations + * ensuring a virtual address + * on the buffer + * @dumb_create_func: callback function for .dumb_create + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + * + * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that + * override the default implementation of &struct drm_driver.dumb_create. Use + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use + * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ + .gem_create_object = drm_gem_cma_create_object_default_funcs, \ + .dumb_create = dumb_create_func, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ + .gem_prime_mmap = drm_gem_prime_mmap + +/** + * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual + * address on the buffer + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + * + * Drivers that come with their own implementation of + * &struct drm_driver.dumb_create should use + * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use + * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a + * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS + * instead. + */ +#define DRM_GEM_CMA_DRIVER_OPS_VMAP \ + DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create) + +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +#endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h new file mode 100644 index 000000000..6b0131549 --- /dev/null +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -0,0 +1,51 @@ +#ifndef __DRM_GEM_FB_HELPER_H__ +#define __DRM_GEM_FB_HELPER_H__ + +struct drm_afbc_framebuffer; +struct drm_device; +struct drm_fb_helper_surface_size; +struct drm_file; +struct drm_framebuffer; +struct drm_framebuffer_funcs; +struct drm_gem_object; +struct drm_mode_fb_cmd2; +struct drm_plane; +struct drm_plane_state; +struct drm_simple_display_pipe; + +#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52) + +struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb, + unsigned int plane); +void drm_gem_fb_destroy(struct drm_framebuffer *fb); +int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file, + unsigned int *handle); + +int drm_gem_fb_init_with_funcs(struct drm_device *dev, + struct drm_framebuffer *fb, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd, + const struct drm_framebuffer_funcs *funcs); +struct drm_framebuffer * +drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * +drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); + +#define drm_is_afbc(modifier) \ + (((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0)) + +int drm_gem_fb_afbc_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_afbc_framebuffer *afbc_fb); + +int drm_gem_fb_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); +int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +#endif diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h new file mode 100644 index 000000000..5381f0c8c --- /dev/null +++ b/include/drm/drm_gem_shmem_helper.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __DRM_GEM_SHMEM_HELPER_H__ +#define __DRM_GEM_SHMEM_HELPER_H__ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mutex.h> + +#include <drm/drm_file.h> +#include <drm/drm_gem.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_prime.h> + +struct dma_buf_attachment; +struct drm_mode_create_dumb; +struct drm_printer; +struct sg_table; + +/** + * struct drm_gem_shmem_object - GEM object backed by shmem + */ +struct drm_gem_shmem_object { + /** + * @base: Base GEM object + */ + struct drm_gem_object base; + + /** + * @pages_lock: Protects the page table and use count + */ + struct mutex pages_lock; + + /** + * @pages: Page table + */ + struct page **pages; + + /** + * @pages_use_count: + * + * Reference count on the pages table. + * The pages are put when the count reaches zero. + */ + unsigned int pages_use_count; + + /** + * @madv: State for madvise + * + * 0 is active/inuse. + * A negative value is the object is purged. + * Positive values are driver specific and not used by the helpers. + */ + int madv; + + /** + * @madv_list: List entry for madvise tracking + * + * Typically used by drivers to track purgeable objects + */ + struct list_head madv_list; + + /** + * @pages_mark_dirty_on_put: + * + * Mark pages as dirty when they are put. + */ + unsigned int pages_mark_dirty_on_put : 1; + + /** + * @pages_mark_accessed_on_put: + * + * Mark pages as accessed when they are put. + */ + unsigned int pages_mark_accessed_on_put : 1; + + /** + * @sgt: Scatter/gather table for imported PRIME buffers + */ + struct sg_table *sgt; + + /** + * @vmap_lock: Protects the vmap address and use count + */ + struct mutex vmap_lock; + + /** + * @vaddr: Kernel virtual address of the backing memory + */ + void *vaddr; + + /** + * @vmap_use_count: + * + * Reference count on the virtual address. + * The address are un-mapped when the count reaches zero. + */ + unsigned int vmap_use_count; + + /** + * @map_cached: map object cached (instead of using writecombine). + */ + bool map_cached; +}; + +#define to_drm_gem_shmem_obj(obj) \ + container_of(obj, struct drm_gem_shmem_object, base) + +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); +void drm_gem_shmem_free_object(struct drm_gem_object *obj); + +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +int drm_gem_shmem_pin(struct drm_gem_object *obj); +void drm_gem_shmem_unpin(struct drm_gem_object *obj); +void *drm_gem_shmem_vmap(struct drm_gem_object *obj); +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); + +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); + +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) +{ + return (shmem->madv > 0) && + !shmem->vmap_use_count && shmem->sgt && + !shmem->base.dma_buf && !shmem->base.import_attach; +} + +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); +bool drm_gem_shmem_purge(struct drm_gem_object *obj); + +struct drm_gem_shmem_object * +drm_gem_shmem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + uint32_t *handle); + +struct drm_gem_object * +drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size); + +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj); + +/** + * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations + * + * This macro provides a shortcut for setting the shmem GEM operations in + * the &drm_driver structure. + */ +#define DRM_GEM_SHMEM_DRIVER_OPS \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ + .gem_prime_mmap = drm_gem_prime_mmap, \ + .dumb_create = drm_gem_shmem_dumb_create + +#endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h new file mode 100644 index 000000000..118cef76f --- /dev/null +++ b/include/drm/drm_gem_ttm_helper.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_GEM_TTM_HELPER_H +#define DRM_GEM_TTM_HELPER_H + +#include <linux/kernel.h> + +#include <drm/drm_gem.h> +#include <drm/drm_device.h> +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> + +#define drm_gem_ttm_of_gem(gem_obj) \ + container_of(gem_obj, struct ttm_buffer_object, base) + +void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *gem); +int drm_gem_ttm_mmap(struct drm_gem_object *gem, + struct vm_area_struct *vma); + +#endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h new file mode 100644 index 000000000..62cc6e6c3 --- /dev/null +++ b/include/drm/drm_gem_vram_helper.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_GEM_VRAM_HELPER_H +#define DRM_GEM_VRAM_HELPER_H + +#include <drm/drm_file.h> +#include <drm/drm_gem.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_modes.h> +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> + +#include <linux/kernel.h> /* for container_of() */ + +struct drm_mode_create_dumb; +struct drm_plane; +struct drm_plane_state; +struct drm_simple_display_pipe; +struct filp; +struct vm_area_struct; + +#define DRM_GEM_VRAM_PL_FLAG_SYSTEM (1 << 0) +#define DRM_GEM_VRAM_PL_FLAG_VRAM (1 << 1) +#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN (1 << 2) + +/* + * Buffer-object helpers + */ + +/** + * struct drm_gem_vram_object - GEM object backed by VRAM + * @gem: GEM object + * @bo: TTM buffer object + * @kmap: Mapping information for @bo + * @placement: TTM placement information. Supported placements are \ + %TTM_PL_VRAM and %TTM_PL_SYSTEM + * @placements: TTM placement information. + * @pin_count: Pin counter + * + * The type struct drm_gem_vram_object represents a GEM object that is + * backed by VRAM. It can be used for simple framebuffer devices with + * dedicated memory. The buffer object can be evicted to system memory if + * video memory becomes scarce. + * + * GEM VRAM objects perform reference counting for pin and mapping + * operations. So a buffer object that has been pinned N times with + * drm_gem_vram_pin() must be unpinned N times with + * drm_gem_vram_unpin(). The same applies to pairs of + * drm_gem_vram_kmap() and drm_gem_vram_kunmap(), as well as pairs of + * drm_gem_vram_vmap() and drm_gem_vram_vunmap(). + */ +struct drm_gem_vram_object { + struct ttm_buffer_object bo; + struct ttm_bo_kmap_obj kmap; + + /** + * @kmap_use_count: + * + * Reference count on the virtual address. + * The address are un-mapped when the count reaches zero. + */ + unsigned int kmap_use_count; + + /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ + struct ttm_placement placement; + struct ttm_place placements[2]; + + int pin_count; +}; + +/** + * Returns the container of type &struct drm_gem_vram_object + * for field bo. + * @bo: the VRAM buffer object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_bo( + struct ttm_buffer_object *bo) +{ + return container_of(bo, struct drm_gem_vram_object, bo); +} + +/** + * Returns the container of type &struct drm_gem_vram_object + * for field gem. + * @gem: the GEM object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_gem( + struct drm_gem_object *gem) +{ + return container_of(gem, struct drm_gem_vram_object, bo.base); +} + +struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, + size_t size, + unsigned long pg_align); +void drm_gem_vram_put(struct drm_gem_vram_object *gbo); +u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); +s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); +int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); +int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); +void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr); + +int drm_gem_vram_fill_create_dumb(struct drm_file *file, + struct drm_device *dev, + unsigned long pg_align, + unsigned long pitch_align, + struct drm_mode_create_dumb *args); + +/* + * Helpers for struct drm_driver + */ + +int drm_gem_vram_driver_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, uint64_t *offset); + +/* + * Helpers for struct drm_plane_helper_funcs + */ +int +drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state); +void +drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state); + +/* + * Helpers for struct drm_simple_display_pipe_funcs + */ + +int drm_gem_vram_simple_display_pipe_prepare_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *new_state); + +void drm_gem_vram_simple_display_pipe_cleanup_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); + +/** + * define DRM_GEM_VRAM_DRIVER - default callback functions for \ + &struct drm_driver + * + * Drivers that use VRAM MM and GEM VRAM can use this macro to initialize + * &struct drm_driver with default functions. + */ +#define DRM_GEM_VRAM_DRIVER \ + .debugfs_init = drm_vram_mm_debugfs_init, \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \ + .gem_prime_mmap = drm_gem_prime_mmap + +/* + * VRAM memory manager + */ + +/** + * struct drm_vram_mm - An instance of VRAM MM + * @vram_base: Base address of the managed video memory + * @vram_size: Size of the managed video memory in bytes + * @bdev: The TTM BO device. + * @funcs: TTM BO functions + * + * The fields &struct drm_vram_mm.vram_base and + * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are + * available for public read access. Use the field + * &struct drm_vram_mm.bdev to access the TTM BO device. + */ +struct drm_vram_mm { + uint64_t vram_base; + size_t vram_size; + + struct ttm_bo_device bdev; +}; + +/** + * drm_vram_mm_of_bdev() - \ + Returns the container of type &struct ttm_bo_device for field bdev. + * @bdev: the TTM BO device + * + * Returns: + * The containing instance of &struct drm_vram_mm + */ +static inline struct drm_vram_mm *drm_vram_mm_of_bdev( + struct ttm_bo_device *bdev) +{ + return container_of(bdev, struct drm_vram_mm, bdev); +} + +void drm_vram_mm_debugfs_init(struct drm_minor *minor); + +/* + * Helpers for integration with struct drm_device + */ + +struct drm_vram_mm *drm_vram_helper_alloc_mm( + struct drm_device *dev, uint64_t vram_base, size_t vram_size); +void drm_vram_helper_release_mm(struct drm_device *dev); + +int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base, + size_t vram_size); + +/* + * Mode-config helpers + */ + +enum drm_mode_status +drm_vram_helper_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode); + +#endif diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h new file mode 100644 index 000000000..bb95ff011 --- /dev/null +++ b/include/drm/drm_hashtab.h @@ -0,0 +1,79 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#ifndef DRM_HASHTAB_H +#define DRM_HASHTAB_H + +#include <linux/list.h> + +#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +struct drm_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct drm_open_hash { + struct hlist_head *table; + u8 order; +}; + +int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); + +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +void drm_ht_remove(struct drm_open_hash *ht); + +/* + * RCU-safe interface + * + * The user of this API needs to make sure that two or more instances of the + * hash table manipulation functions are never run simultaneously. + * The lookup function drm_ht_find_item_rcu may, however, run simultaneously + * with any of the manipulation functions as long as it's called from within + * an RCU read-locked section. + */ +#define drm_ht_insert_item_rcu drm_ht_insert_item +#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please +#define drm_ht_remove_key_rcu drm_ht_remove_key +#define drm_ht_remove_item_rcu drm_ht_remove_item +#define drm_ht_find_item_rcu drm_ht_find_item + +#endif diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h new file mode 100644 index 000000000..fe58dbb46 --- /dev/null +++ b/include/drm/drm_hdcp.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#ifndef _DRM_HDCP_H_INCLUDED_ +#define _DRM_HDCP_H_INCLUDED_ + +#include <linux/types.h> + +/* Period of hdcp checks (to ensure we're still authenticated) */ +#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16) +#define DRM_HDCP2_CHECK_PERIOD_MS 500 + +/* Shared lengths/masks between HDMI/DVI/DisplayPort */ +#define DRM_HDCP_AN_LEN 8 +#define DRM_HDCP_BSTATUS_LEN 2 +#define DRM_HDCP_KSV_LEN 5 +#define DRM_HDCP_RI_LEN 2 +#define DRM_HDCP_V_PRIME_PART_LEN 4 +#define DRM_HDCP_V_PRIME_NUM_PARTS 5 +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) +#define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) +#define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) + +/* Slave address for the HDCP registers in the receiver */ +#define DRM_HDCP_DDC_ADDR 0x3A + +/* Value to use at the end of the SHA-1 bytestream used for repeaters */ +#define DRM_HDCP_SHA1_TERMINATOR 0x80 + +/* HDCP register offsets for HDMI/DVI devices */ +#define DRM_HDCP_DDC_BKSV 0x00 +#define DRM_HDCP_DDC_RI_PRIME 0x08 +#define DRM_HDCP_DDC_AKSV 0x10 +#define DRM_HDCP_DDC_AN 0x18 +#define DRM_HDCP_DDC_V_PRIME(h) (0x20 + h * 4) +#define DRM_HDCP_DDC_BCAPS 0x40 +#define DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT BIT(6) +#define DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY BIT(5) +#define DRM_HDCP_DDC_BSTATUS 0x41 +#define DRM_HDCP_DDC_KSV_FIFO 0x43 + +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 + +/* Protocol message definition for HDCP2.2 specification */ +/* + * Protected content streams are classified into 2 types: + * - Type0: Can be transmitted with HDCP 1.4+ + * - Type1: Can be transmitted with HDCP 2.2+ + */ +#define HDCP_STREAM_TYPE0 0x00 +#define HDCP_STREAM_TYPE1 0x01 + +/* HDCP2.2 Msg IDs */ +#define HDCP_2_2_NULL_MSG 1 +#define HDCP_2_2_AKE_INIT 2 +#define HDCP_2_2_AKE_SEND_CERT 3 +#define HDCP_2_2_AKE_NO_STORED_KM 4 +#define HDCP_2_2_AKE_STORED_KM 5 +#define HDCP_2_2_AKE_SEND_HPRIME 7 +#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8 +#define HDCP_2_2_LC_INIT 9 +#define HDCP_2_2_LC_SEND_LPRIME 10 +#define HDCP_2_2_SKE_SEND_EKS 11 +#define HDCP_2_2_REP_SEND_RECVID_LIST 12 +#define HDCP_2_2_REP_SEND_ACK 15 +#define HDCP_2_2_REP_STREAM_MANAGE 16 +#define HDCP_2_2_REP_STREAM_READY 17 + +#define HDCP_2_2_RTX_LEN 8 +#define HDCP_2_2_RRX_LEN 8 + +#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128 +#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3 +#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \ + HDCP_2_2_K_PUB_RX_EXP_E_LEN) + +#define HDCP_2_2_DCP_LLC_SIG_LEN 384 + +#define HDCP_2_2_E_KPUB_KM_LEN 128 +#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16) +#define HDCP_2_2_H_PRIME_LEN 32 +#define HDCP_2_2_E_KH_KM_LEN 16 +#define HDCP_2_2_RN_LEN 8 +#define HDCP_2_2_L_PRIME_LEN 32 +#define HDCP_2_2_E_DKEY_KS_LEN 16 +#define HDCP_2_2_RIV_LEN 8 +#define HDCP_2_2_SEQ_NUM_LEN 3 +#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2) +#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN +#define HDCP_2_2_MAX_DEVICE_COUNT 31 +#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \ + HDCP_2_2_MAX_DEVICE_COUNT) +#define HDCP_2_2_MPRIME_LEN 32 + +/* Following Macros take a byte at a time for bit(s) masking */ +/* + * TODO: This has to be changed for DP MST, as multiple stream on + * same port is possible. + * For HDCP2.2 on HDMI and DP SST this value is always 1. + */ +#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1 +#define HDCP_2_2_TXCAP_MASK_LEN 2 +#define HDCP_2_2_RXCAPS_LEN 3 +#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1)) +#define HDCP_2_2_RXINFO_LEN 2 + +/* HDCP1.x compliant device in downstream */ +#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0)) + +/* HDCP2.0 Compliant repeater in downstream */ +#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1)) +#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2)) +#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3)) +#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4) +#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0)) +#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1) + +struct hdcp2_cert_rx { + u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN]; + u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN]; + u8 reserved[2]; + u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN]; +} __packed; + +struct hdcp2_streamid_type { + u8 stream_id; + u8 stream_type; +} __packed; + +/* + * The TxCaps field specified in the HDCP HDMI, DP specs + * This field is big endian as specified in the errata. + */ +struct hdcp2_tx_caps { + /* Transmitter must set this to 0x2 */ + u8 version; + + /* Reserved for HDCP and DP Spec. Read as Zero */ + u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN]; +} __packed; + +/* Main structures for HDCP2.2 protocol communication */ +struct hdcp2_ake_init { + u8 msg_id; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +struct hdcp2_ake_send_cert { + u8 msg_id; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct hdcp2_ake_no_stored_km { + u8 msg_id; + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; +} __packed; + +struct hdcp2_ake_stored_km { + u8 msg_id; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; +} __packed; + +struct hdcp2_ake_send_hprime { + u8 msg_id; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct hdcp2_ake_send_pairing_info { + u8 msg_id; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct hdcp2_lc_init { + u8 msg_id; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +struct hdcp2_lc_send_lprime { + u8 msg_id; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct hdcp2_ske_send_eks { + u8 msg_id; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 riv[HDCP_2_2_RIV_LEN]; +} __packed; + +struct hdcp2_rep_send_receiverid_list { + u8 msg_id; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct hdcp2_rep_send_ack { + u8 msg_id; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +struct hdcp2_rep_stream_manage { + u8 msg_id; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT]; +} __packed; + +struct hdcp2_rep_stream_ready { + u8 msg_id; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; +} __packed; + +/* HDCP2.2 TIMEOUTs in mSec */ +#define HDCP_2_2_CERT_TIMEOUT_MS 100 +#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000 +#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200 +#define HDCP_2_2_PAIRING_TIMEOUT_MS 200 +#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20 +#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7 +#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000 +#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100 + +/* HDMI HDCP2.2 Register Offsets */ +#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50 +#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60 +#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70 +#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80 +#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0 + +#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2) +#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02 +#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF +#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200 + +/* Below macros take a byte at a time and mask the bit(s) */ +#define HDCP_2_2_HDMI_RXSTATUS_LEN 2 +#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) +#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) +#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) + +/* + * Helper functions to convert 24bit big endian hdcp sequence number to + * host format and back + */ +static inline +u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) +{ + return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16); +} + +static inline +void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) +{ + seq_num[0] = val >> 16; + seq_num[1] = val >> 8; + seq_num[2] = val; +} + +#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024) +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_SRM_ID_MASK (0xF << 4) +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 +#define DRM_HDCP_2_SRM_ID 0x9 +#define DRM_HDCP_2_INDICATOR 0x1 +#define DRM_HDCP_2_INDICATOR_MASK 0xF +#define DRM_HDCP_2_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_2_DCP_SIG_SIZE 384 +#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4 +#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6) + +struct hdcp_srm_header { + u8 srm_id; + u8 reserved; + __be16 srm_version; + u8 srm_gen_no; +} __packed; + +struct drm_device; +struct drm_connector; + +int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, + u8 *ksvs, u32 ksv_count); +int drm_connector_attach_content_protection_property( + struct drm_connector *connector, bool hdcp_content_type); +void drm_hdcp_update_content_protection(struct drm_connector *connector, + u64 val); + +/* Content Type classification for HDCP2.2 vs others */ +#define DRM_MODE_HDCP_CONTENT_TYPE0 0 +#define DRM_MODE_HDCP_CONTENT_TYPE1 1 + +#endif diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h new file mode 100644 index 000000000..afb27cb6a --- /dev/null +++ b/include/drm/drm_ioctl.h @@ -0,0 +1,186 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_IOCTL_H_ +#define _DRM_IOCTL_H_ + +#include <linux/types.h> +#include <linux/bitops.h> + +#include <asm/ioctl.h> + +struct drm_device; +struct drm_file; +struct file; + +/** + * drm_ioctl_t - DRM ioctl function type. + * @dev: DRM device inode + * @data: private pointer of the ioctl call + * @file_priv: DRM file this ioctl was made on + * + * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data + * into kernel-space, and will also copy it back, depending upon the read/write + * settings in the ioctl command code. + */ +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/** + * drm_ioctl_compat_t - compatibility DRM ioctl function type. + * @filp: file pointer + * @cmd: ioctl command code + * @arg: DRM file this ioctl was made on + * + * Just a typedef to make declaring an array of compatibility handlers easier. + * New drivers shouldn't screw up the structure layout for their ioctl + * structures and hence never need this. + */ +typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + unsigned long arg); + +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) +#define DRM_MAJOR 226 + +/** + * enum drm_ioctl_flags - DRM ioctl flags + * + * Various flags that can be set in &drm_ioctl_desc.flags to control how + * userspace can use a given ioctl. + */ +enum drm_ioctl_flags { + /** + * @DRM_AUTH: + * + * This is for ioctl which are used for rendering, and require that the + * file descriptor is either for a render node, or if it's a + * legacy/primary node, then it must be authenticated. + */ + DRM_AUTH = BIT(0), + /** + * @DRM_MASTER: + * + * This must be set for any ioctl which can change the modeset or + * display state. Userspace must call the ioctl through a primary node, + * while it is the active master. + * + * Note that read-only modeset ioctl can also be called by + * unauthenticated clients, or when a master is not the currently active + * one. + */ + DRM_MASTER = BIT(1), + /** + * @DRM_ROOT_ONLY: + * + * Anything that could potentially wreak a master file descriptor needs + * to have this flag set. Current that's only for the SETMASTER and + * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving + * master (display compositor) into compliance. + * + * This is equivalent to callers with the SYSADMIN capability. + */ + DRM_ROOT_ONLY = BIT(2), + /** + * @DRM_UNLOCKED: + * + * Whether &drm_ioctl_desc.func should be called with the DRM BKL held + * or not. Enforced as the default for all modern drivers, hence there + * should never be a need to set this flag. + * + * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the + * only legacy IOCTL which needs this. + */ + DRM_UNLOCKED = BIT(4), + /** + * @DRM_RENDER_ALLOW: + * + * This is used for all ioctl needed for rendering only, for drivers + * which support render nodes. This should be all new render drivers, + * and hence it should be always set for any ioctl with DRM_AUTH set. + * Note though that read-only query ioctl might have this set, but have + * not set DRM_AUTH because they do not require authentication. + */ + DRM_RENDER_ALLOW = BIT(5), +}; + +/** + * struct drm_ioctl_desc - DRM driver ioctl entry + * @cmd: ioctl command number, without flags + * @flags: a bitmask of &enum drm_ioctl_flags + * @func: handler for this ioctl + * @name: user-readable name for debug output + * + * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV() + * macro. + */ +struct drm_ioctl_desc { + unsigned int cmd; + enum drm_ioctl_flags flags; + drm_ioctl_t *func; + const char *name; +}; + +/** + * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc + * @ioctl: ioctl command suffix + * @_func: handler for the ioctl + * @_flags: a bitmask of &enum drm_ioctl_flags + * + * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl + * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that + * to DRM_IOCTL_NR(). + */ +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); +long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32); +#ifdef CONFIG_COMPAT +long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#else +/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */ +#define drm_compat_ioctl NULL +#endif +bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); + +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* _DRM_IOCTL_H_ */ diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h new file mode 100644 index 000000000..d77f6e65b --- /dev/null +++ b/include/drm/drm_irq.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_IRQ_H_ +#define _DRM_IRQ_H_ + +struct drm_device; + +int drm_irq_install(struct drm_device *dev, int irq); +int drm_irq_uninstall(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h new file mode 100644 index 000000000..5c9ef6a2a --- /dev/null +++ b/include/drm/drm_lease.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright © 2017 Keith Packard <keithp@keithp.com> + */ + +#ifndef _DRM_LEASE_H_ +#define _DRM_LEASE_H_ + +struct drm_file; +struct drm_device; +struct drm_master; + +struct drm_master *drm_lease_owner(struct drm_master *master); + +void drm_lease_destroy(struct drm_master *lessee); + +bool drm_lease_held(struct drm_file *file_priv, int id); + +bool _drm_lease_held(struct drm_file *file_priv, int id); + +void drm_lease_revoke(struct drm_master *master); + +uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs); + +int drm_mode_create_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_list_lessees_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_get_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_revoke_lease_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +#endif /* _DRM_LEASE_H_ */ diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h new file mode 100644 index 000000000..852d7451e --- /dev/null +++ b/include/drm/drm_legacy.h @@ -0,0 +1,235 @@ +#ifndef __DRM_DRM_LEGACY_H__ +#define __DRM_DRM_LEGACY_H__ +/* + * Legacy driver interfaces for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm.h> +#include <drm/drm_auth.h> +#include <drm/drm_hashtab.h> + +struct drm_device; +struct drm_driver; +struct file; +struct pci_driver; + +/* + * Legacy Support for palateontologic DRM drivers + * + * If you add a new driver and it uses any of these functions or structures, + * you're doing it terribly wrong. + */ + +/** + * DMA buffer. + */ +struct drm_buf { + int idx; /**< Index into master buflist */ + int total; /**< Buffer size */ + int order; /**< log-base-2(total) */ + int used; /**< Amount of buffer in use (for DMA) */ + unsigned long offset; /**< Byte offset (used internally) */ + void *address; /**< Address of buffer */ + unsigned long bus_address; /**< Bus address of buffer */ + struct drm_buf *next; /**< Kernel-only: used for free list */ + __volatile__ int waiting; /**< On kernel DMA queue */ + __volatile__ int pending; /**< On hardware DMA queue */ + struct drm_file *file_priv; /**< Private of holding file descr */ + int context; /**< Kernel queue for this buffer */ + int while_locked; /**< Dispatch this buffer while locked */ + enum { + DRM_LIST_NONE = 0, + DRM_LIST_FREE = 1, + DRM_LIST_WAIT = 2, + DRM_LIST_PEND = 3, + DRM_LIST_PRIO = 4, + DRM_LIST_RECLAIM = 5 + } list; /**< Which list we're on */ + + int dev_priv_size; /**< Size of buffer private storage */ + void *dev_private; /**< Per-buffer private storage */ +}; + +typedef struct drm_dma_handle { + dma_addr_t busaddr; + void *vaddr; + size_t size; +} drm_dma_handle_t; + +/** + * Buffer entry. There is one of this for each buffer size order. + */ +struct drm_buf_entry { + int buf_size; /**< size */ + int buf_count; /**< number of buffers */ + struct drm_buf *buflist; /**< buffer list */ + int seg_count; + int page_order; + struct drm_dma_handle **seglist; + + int low_mark; /**< Low water mark */ + int high_mark; /**< High water mark */ +}; + +/** + * DMA data. + */ +struct drm_device_dma { + + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + int buf_count; /**< total number of buffers */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + int seg_count; + int page_count; /**< number of pages */ + unsigned long *pagelist; /**< page list */ + unsigned long byte_count; + enum { + _DRM_DMA_USE_AGP = 0x01, + _DRM_DMA_USE_SG = 0x02, + _DRM_DMA_USE_FB = 0x04, + _DRM_DMA_USE_PCI_RO = 0x08 + } flags; + +}; + +/** + * Scatter-gather memory. + */ +struct drm_sg_mem { + unsigned long handle; + void *virtual; + int pages; + struct page **pagelist; + dma_addr_t *busaddr; +}; + +/** + * Kernel side of a mapping + */ +struct drm_local_map { + dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ + unsigned long size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + void *handle; /**< User-space: "Handle" to pass to mmap() */ + /**< Kernel-space: kernel-virtual address */ + int mtrr; /**< MTRR slot used */ +}; + +typedef struct drm_local_map drm_local_map_t; + +/** + * Mappings list + */ +struct drm_map_list { + struct list_head head; /**< list head */ + struct drm_hash_item hash; + struct drm_local_map *map; /**< mapping */ + uint64_t user_token; + struct drm_master *master; +}; + +int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, struct drm_local_map **map_p); +struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); +void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); +int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); +struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); +int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); + +int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); +int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); + +/** + * Test that the hardware lock is held by the caller, returning otherwise. + * + * \param dev DRM device. + * \param filp file pointer of the caller. + */ +#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ +do { \ + if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ + _file_priv->master->lock.file_priv != _file_priv) { \ + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ + __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ + _file_priv->master->lock.file_priv, _file_priv); \ + return -EINVAL; \ + } \ +} while (0) + +void drm_legacy_idlelock_take(struct drm_lock_data *lock); +void drm_legacy_idlelock_release(struct drm_lock_data *lock); + +/* drm_pci.c */ + +#ifdef CONFIG_PCI + +struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, + size_t align); +void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah); + +int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); + +#else + +static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, + size_t size, size_t align) +{ + return NULL; +} + +static inline void drm_pci_free(struct drm_device *dev, + struct drm_dma_handle *dmah) +{ +} + +static inline int drm_legacy_pci_init(struct drm_driver *driver, + struct pci_driver *pdriver) +{ + return -EINVAL; +} + +static inline void drm_legacy_pci_exit(struct drm_driver *driver, + struct pci_driver *pdriver) +{ +} + +#endif + +/* drm_memory.c */ +void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); +void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); + +#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h new file mode 100644 index 000000000..ca4114633 --- /dev/null +++ b/include/drm/drm_managed.h @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _DRM_MANAGED_H_ +#define _DRM_MANAGED_H_ + +#include <linux/gfp.h> +#include <linux/overflow.h> +#include <linux/types.h> + +struct drm_device; + +typedef void (*drmres_release_t)(struct drm_device *dev, void *res); + +/** + * drmm_add_action - add a managed release action to a &drm_device + * @dev: DRM device + * @action: function which should be called when @dev is released + * @data: opaque pointer, passed to @action + * + * This function adds the @release action with optional parameter @data to the + * list of cleanup actions for @dev. The cleanup actions will be run in reverse + * order in the final drm_dev_put() call for @dev. + */ +#define drmm_add_action(dev, action, data) \ + __drmm_add_action(dev, action, data, #action) + +int __must_check __drmm_add_action(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name); + +/** + * drmm_add_action_or_reset - add a managed release action to a &drm_device + * @dev: DRM device + * @action: function which should be called when @dev is released + * @data: opaque pointer, passed to @action + * + * Similar to drmm_add_action(), with the only difference that upon failure + * @action is directly called for any cleanup work necessary on failures. + */ +#define drmm_add_action_or_reset(dev, action, data) \ + __drmm_add_action_or_reset(dev, action, data, #action) + +int __must_check __drmm_add_action_or_reset(struct drm_device *dev, + drmres_release_t action, + void *data, const char *name); + +void drmm_add_final_kfree(struct drm_device *dev, void *container); + +void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc; + +/** + * drmm_kzalloc - &drm_device managed kzalloc() + * @dev: DRM device + * @size: size of the memory allocation + * @gfp: GFP allocation flags + * + * This is a &drm_device managed version of kzalloc(). The allocated memory is + * automatically freed on the final drm_dev_put(). Memory can also be freed + * before the final drm_dev_put() by calling drmm_kfree(). + */ +static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) +{ + return drmm_kmalloc(dev, size, gfp | __GFP_ZERO); +} + +/** + * drmm_kmalloc_array - &drm_device managed kmalloc_array() + * @dev: DRM device + * @n: number of array elements to allocate + * @size: size of array member + * @flags: GFP allocation flags + * + * This is a &drm_device managed version of kmalloc_array(). The allocated + * memory is automatically freed on the final drm_dev_put() and works exactly + * like a memory allocation obtained by drmm_kmalloc(). + */ +static inline void *drmm_kmalloc_array(struct drm_device *dev, + size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return drmm_kmalloc(dev, bytes, flags); +} + +/** + * drmm_kcalloc - &drm_device managed kcalloc() + * @dev: DRM device + * @n: number of array elements to allocate + * @size: size of array member + * @flags: GFP allocation flags + * + * This is a &drm_device managed version of kcalloc(). The allocated memory is + * automatically freed on the final drm_dev_put() and works exactly like a + * memory allocation obtained by drmm_kmalloc(). + */ +static inline void *drmm_kcalloc(struct drm_device *dev, + size_t n, size_t size, gfp_t flags) +{ + return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); +} + +char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); + +void drmm_kfree(struct drm_device *dev, void *data); + +#endif diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h new file mode 100644 index 000000000..c2827ceab --- /dev/null +++ b/include/drm/drm_mipi_dbi.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include <linux/mutex.h> +#include <drm/drm_device.h> +#include <drm/drm_simple_kms_helper.h> + +struct drm_rect; +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI interface + */ +struct mipi_dbi { + /** + * @cmdlock: Command lock + */ + struct mutex cmdlock; + + /** + * @command: Bus specific callback executing commands. + */ + int (*command)(struct mipi_dbi *dbi, u8 *cmd, u8 *param, size_t num); + + /** + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + */ + const u8 *read_commands; + + /** + * @swap_bytes: Swap bytes in buffer before transfer + */ + bool swap_bytes; + + /** + * @reset: Optional reset gpio + */ + struct gpio_desc *reset; + + /* Type C specific */ + + /** + * @spi: SPI device + */ + struct spi_device *spi; + + /** + * @dc: Optional D/C gpio. + */ + struct gpio_desc *dc; + + /** + * @tx_buf9: Buffer used for Option 1 9-bit conversion + */ + void *tx_buf9; + + /** + * @tx_buf9_len: Size of tx_buf9. + */ + size_t tx_buf9_len; +}; + +/** + * struct mipi_dbi_dev - MIPI DBI device + */ +struct mipi_dbi_dev { + /** + * @drm: DRM device + */ + struct drm_device drm; + + /** + * @pipe: Display pipe structure + */ + struct drm_simple_display_pipe pipe; + + /** + * @connector: Connector + */ + struct drm_connector connector; + + /** + * @mode: Fixed display mode + */ + struct drm_display_mode mode; + + /** + * @tx_buf: Buffer used for transfer (copy clip rect area) + */ + u16 *tx_buf; + + /** + * @rotation: initial rotation in degrees Counter Clock Wise + */ + unsigned int rotation; + + /** + * @left_offset: Horizontal offset of the display relative to the + * controller's driver array + */ + unsigned int left_offset; + + /** + * @top_offset: Vertical offset of the display relative to the + * controller's driver array + */ + unsigned int top_offset; + + /** + * @backlight: backlight device (optional) + */ + struct backlight_device *backlight; + + /** + * @regulator: power regulator (optional) + */ + struct regulator *regulator; + + /** + * @dbi: MIPI DBI interface + */ + struct mipi_dbi dbi; +}; + +static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm) +{ + return container_of(drm, struct mipi_dbi_dev, drm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi, + struct gpio_desc *dc); +int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation, size_t tx_buf_size); +int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const struct drm_display_mode *mode, unsigned int rotation); +void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plan_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *dbi); +bool mipi_dbi_display_is_on(struct mipi_dbi *dbi); +int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev); +int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev); + +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); +int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz, + u8 bpw, const void *buf, size_t len); + +int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, + size_t len); +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_rect *clip, bool swap); +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @dbi: MIPI DBI structure + * @cmd: Command + * @seq...: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(dbi, cmd, seq...) \ +({ \ + const u8 d[] = { seq }; \ + mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ +}) + +#ifdef CONFIG_DEBUG_FS +void mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +#define mipi_dbi_debugfs_init NULL +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h new file mode 100644 index 000000000..31ba85a41 --- /dev/null +++ b/include/drm/drm_mipi_dsi.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI DSI Bus + * + * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. + * Andrzej Hajda <a.hajda@samsung.com> + */ + +#ifndef __DRM_MIPI_DSI_H__ +#define __DRM_MIPI_DSI_H__ + +#include <linux/device.h> + +struct mipi_dsi_host; +struct mipi_dsi_device; +struct drm_dsc_picture_parameter_set; + +/* request ACK from peripheral */ +#define MIPI_DSI_MSG_REQ_ACK BIT(0) +/* use Low Power Mode to transmit message */ +#define MIPI_DSI_MSG_USE_LPM BIT(1) + +/** + * struct mipi_dsi_msg - read/write DSI buffer + * @channel: virtual channel id + * @type: payload data type + * @flags: flags controlling this message transmission + * @tx_len: length of @tx_buf + * @tx_buf: data to be written + * @rx_len: length of @rx_buf + * @rx_buf: data to be read, or NULL + */ +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + + size_t tx_len; + const void *tx_buf; + + size_t rx_len; + void *rx_buf; +}; + +bool mipi_dsi_packet_format_is_short(u8 type); +bool mipi_dsi_packet_format_is_long(u8 type); + +/** + * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format + * @size: size (in bytes) of the packet + * @header: the four bytes that make up the header (Data ID, Word Count or + * Packet Data, and ECC) + * @payload_length: number of bytes in the payload + * @payload: a pointer to a buffer containing the payload, if any + */ +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, + const struct mipi_dsi_msg *msg); + +/** + * struct mipi_dsi_host_ops - DSI bus operations + * @attach: attach DSI device to DSI host + * @detach: detach DSI device from DSI host + * @transfer: transmit a DSI packet + * + * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg + * structures. This structure contains information about the type of packet + * being transmitted as well as the transmit and receive buffers. When an + * error is encountered during transmission, this function will return a + * negative error code. On success it shall return the number of bytes + * transmitted for write packets or the number of bytes received for read + * packets. + * + * Note that typically DSI packet transmission is atomic, so the .transfer() + * function will seldomly return anything other than the number of bytes + * contained in the transmit buffer on success. + */ +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + int (*detach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + ssize_t (*transfer)(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +}; + +/** + * struct mipi_dsi_host - DSI host device + * @dev: driver model device node for this DSI host + * @ops: DSI host operations + * @list: list management + */ +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; + struct list_head list; +}; + +int mipi_dsi_host_register(struct mipi_dsi_host *host); +void mipi_dsi_host_unregister(struct mipi_dsi_host *host); +struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); + +/* DSI mode flags */ + +/* video mode */ +#define MIPI_DSI_MODE_VIDEO BIT(0) +/* video burst mode */ +#define MIPI_DSI_MODE_VIDEO_BURST BIT(1) +/* video pulse mode */ +#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2) +/* enable auto vertical count mode */ +#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3) +/* enable hsync-end packets in vsync-pulse and v-porch area */ +#define MIPI_DSI_MODE_VIDEO_HSE BIT(4) +/* disable hfront-porch area */ +#define MIPI_DSI_MODE_VIDEO_HFP BIT(5) +/* disable hback-porch area */ +#define MIPI_DSI_MODE_VIDEO_HBP BIT(6) +/* disable hsync-active area */ +#define MIPI_DSI_MODE_VIDEO_HSA BIT(7) +/* flush display FIFO on vsync pulse */ +#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) +/* disable EoT packets in HS mode */ +#define MIPI_DSI_MODE_EOT_PACKET BIT(9) +/* device supports non-continuous clock behavior (DSI spec 5.6.1) */ +#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) +/* transmit data in low power */ +#define MIPI_DSI_MODE_LPM BIT(11) + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888, + MIPI_DSI_FMT_RGB666, + MIPI_DSI_FMT_RGB666_PACKED, + MIPI_DSI_FMT_RGB565, +}; + +#define DSI_DEV_NAME_SIZE 20 + +/** + * struct mipi_dsi_device_info - template for creating a mipi_dsi_device + * @type: DSI peripheral chip type + * @channel: DSI virtual channel assigned to peripheral + * @node: pointer to OF device node or NULL + * + * This is populated and passed to mipi_dsi_device_new to create a new + * DSI device + */ +struct mipi_dsi_device_info { + char type[DSI_DEV_NAME_SIZE]; + u32 channel; + struct device_node *node; +}; + +/** + * struct mipi_dsi_device - DSI peripheral device + * @host: DSI host for this peripheral + * @dev: driver model device node for this peripheral + * @name: DSI peripheral chip type + * @channel: virtual channel assigned to the peripheral + * @format: pixel format for video mode + * @lanes: number of active data lanes + * @mode_flags: DSI operation mode related flags + * @hs_rate: maximum lane frequency for high speed mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers + * @lp_rate: maximum lane frequency for low power mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers + */ +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + + char name[DSI_DEV_NAME_SIZE]; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; + unsigned long hs_rate; + unsigned long lp_rate; +}; + +#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" + +static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) +{ + return container_of(dev, struct mipi_dsi_device, dev); +} + +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + +struct mipi_dsi_device * +mipi_dsi_device_register_full(struct mipi_dsi_host *host, + const struct mipi_dsi_device_info *info); +void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi); +struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); +int mipi_dsi_attach(struct mipi_dsi_device *dsi); +int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, + u16 value); +ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); +ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, + const struct drm_dsc_picture_parameter_set *pps); + +ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, + size_t size); +ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, + size_t num_params, void *data, size_t size); + +/** + * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode + * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking + * information only + * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both + * V-Blanking and H-Blanking information + */ +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK, +}; + +#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2) +#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3) +#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4) +#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5) +#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6) + +ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, + size_t len); +int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); +int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format); +int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, + enum mipi_dsi_dcs_tear_mode mode); +int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); +int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline); +int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, + u16 *brightness); +int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, + u16 *brightness); + +/** + * struct mipi_dsi_driver - DSI driver + * @driver: device driver model driver + * @probe: callback for device binding + * @remove: callback for device unbinding + * @shutdown: called at shutdown time to quiesce the device + */ +struct mipi_dsi_driver { + struct device_driver driver; + int(*probe)(struct mipi_dsi_device *dsi); + int(*remove)(struct mipi_dsi_device *dsi); + void (*shutdown)(struct mipi_dsi_device *dsi); +}; + +static inline struct mipi_dsi_driver * +to_mipi_dsi_driver(struct device_driver *driver) +{ + return container_of(driver, struct mipi_dsi_driver, driver); +} + +static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) +{ + return dev_get_drvdata(&dsi->dev); +} + +static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) +{ + dev_set_drvdata(&dsi->dev, data); +} + +int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver, + struct module *owner); +void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); + +#define mipi_dsi_driver_register(driver) \ + mipi_dsi_driver_register_full(driver, THIS_MODULE) + +#define module_mipi_dsi_driver(__mipi_dsi_driver) \ + module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ + mipi_dsi_driver_unregister) + +#endif /* __DRM_MIPI_DSI__ */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h new file mode 100644 index 000000000..9b4292f22 --- /dev/null +++ b/include/drm/drm_mm.h @@ -0,0 +1,551 @@ +/************************************************************************** + * + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> + */ + +#ifndef _DRM_MM_H_ +#define _DRM_MM_H_ + +/* + * Generic range manager structs + */ +#include <linux/bug.h> +#include <linux/rbtree.h> +#include <linux/kernel.h> +#include <linux/mm_types.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#ifdef CONFIG_DRM_DEBUG_MM +#include <linux/stackdepot.h> +#endif +#include <drm/drm_print.h> + +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + +/** + * enum drm_mm_insert_mode - control search and allocation behaviour + * + * The &struct drm_mm range manager supports finding a suitable modes using + * a number of search trees. These trees are oranised by size, by address and + * in most recent eviction order. This allows the user to find either the + * smallest hole to reuse, the lowest or highest address to reuse, or simply + * reuse the most recent eviction that fits. When allocating the &drm_mm_node + * from within the hole, the &drm_mm_insert_mode also dictate whether to + * allocate the lowest matching address or the highest. + */ +enum drm_mm_insert_mode { + /** + * @DRM_MM_INSERT_BEST: + * + * Search for the smallest hole (within the search range) that fits + * the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_BEST = 0, + + /** + * @DRM_MM_INSERT_LOW: + * + * Search for the lowest hole (address closest to 0, within the search + * range) that fits the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_LOW, + + /** + * @DRM_MM_INSERT_HIGH: + * + * Search for the highest hole (address closest to U64_MAX, within the + * search range) that fits the desired node. + * + * Allocates the node from the *top* of the found hole. The specified + * alignment for the node is applied to the base of the node + * (&drm_mm_node.start). + */ + DRM_MM_INSERT_HIGH, + + /** + * @DRM_MM_INSERT_EVICT: + * + * Search for the most recently evicted hole (within the search range) + * that fits the desired node. This is appropriate for use immediately + * after performing an eviction scan (see drm_mm_scan_init()) and + * removing the selected nodes to form a hole. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_EVICT, + + /** + * @DRM_MM_INSERT_ONCE: + * + * Only check the first hole for suitablity and report -ENOSPC + * immediately otherwise, rather than check every hole until a + * suitable one is found. Can only be used in conjunction with another + * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. + */ + DRM_MM_INSERT_ONCE = BIT(31), + + /** + * @DRM_MM_INSERT_HIGHEST: + * + * Only check the highest hole (the hole with the largest address) and + * insert the node at the top of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, + + /** + * @DRM_MM_INSERT_LOWEST: + * + * Only check the lowest hole (the hole with the smallest address) and + * insert the node at the bottom of the hole or report -ENOSPC if + * unsuitable. + * + * Does not search all holes. + */ + DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, +}; + +/** + * struct drm_mm_node - allocated block in the DRM allocator + * + * This represents an allocated block in a &drm_mm allocator. Except for + * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is + * entirely opaque and should only be accessed through the provided funcions. + * Since allocation of these nodes is entirely handled by the driver they can be + * embedded. + */ +struct drm_mm_node { + /** @color: Opaque driver-private tag. */ + unsigned long color; + /** @start: Start address of the allocated block. */ + u64 start; + /** @size: Size of the allocated block. */ + u64 size; + /* private: */ + struct drm_mm *mm; + struct list_head node_list; + struct list_head hole_stack; + struct rb_node rb; + struct rb_node rb_hole_size; + struct rb_node rb_hole_addr; + u64 __subtree_last; + u64 hole_size; + u64 subtree_max_hole; + unsigned long flags; +#define DRM_MM_NODE_ALLOCATED_BIT 0 +#define DRM_MM_NODE_SCANNED_BIT 1 +#ifdef CONFIG_DRM_DEBUG_MM + depot_stack_handle_t stack; +#endif +}; + +/** + * struct drm_mm - DRM allocator + * + * DRM range allocator with a few special functions and features geared towards + * managing GPU memory. Except for the @color_adjust callback the structure is + * entirely opaque and should only be accessed through the provided functions + * and macros. This structure can be embedded into larger driver structures. + */ +struct drm_mm { + /** + * @color_adjust: + * + * Optional driver callback to further apply restrictions on a hole. The + * node argument points at the node containing the hole from which the + * block would be allocated (see drm_mm_hole_follows() and friends). The + * other arguments are the size of the block to be allocated. The driver + * can adjust the start and end as needed to e.g. insert guard pages. + */ + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end); + + /* private: */ + /* List of all memory nodes that immediately precede a free hole. */ + struct list_head hole_stack; + /* head_node.node_list is the list of all memory nodes, ordered + * according to the (increasing) start address of the memory node. */ + struct drm_mm_node head_node; + /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ + struct rb_root_cached interval_tree; + struct rb_root_cached holes_size; + struct rb_root holes_addr; + + unsigned long scan_active; +}; + +/** + * struct drm_mm_scan - DRM allocator eviction roaster data + * + * This structure tracks data needed for the eviction roaster set up using + * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and + * drm_mm_scan_remove_block(). The structure is entirely opaque and should only + * be accessed through the provided functions and macros. It is meant to be + * allocated temporarily by the driver on the stack. + */ +struct drm_mm_scan { + /* private: */ + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + enum drm_mm_insert_mode mode; +}; + +/** + * drm_mm_node_allocated - checks whether a node is allocated + * @node: drm_mm_node to check + * + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @node is allocated. + */ +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) +{ + return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); +} + +/** + * drm_mm_initialized - checks whether an allocator is initialized + * @mm: drm_mm to check + * + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm + * internals. + * + * Returns: + * True if the @mm is initialized. + */ +static inline bool drm_mm_initialized(const struct drm_mm *mm) +{ + return READ_ONCE(mm->hole_stack.next); +} + +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. See also drm_mm_hole_node_start() and + * drm_mm_hole_node_end(). + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_size; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + return hole_node->start + hole_node->size; +} + +/** + * drm_mm_hole_node_start - computes the start of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() + * + * Returns: + * Start of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) +{ + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); + return __drm_mm_hole_node_start(hole_node); +} + +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return list_next_entry(hole_node, node_list)->start; +} + +/** + * drm_mm_hole_node_end - computes the end of the hole following @node + * @hole_node: drm_mm_node which implicitly tracks the following hole + * + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). + * + * Returns: + * End of the subsequent hole. + */ +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) +{ + return __drm_mm_hole_node_end(hole_node); +} + +/** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manager + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** + * drm_mm_for_each_node - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. + */ +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @next: &struct drm_mm_node to store the next step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe(), so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_hole - iterator to walk over all holes + * @pos: &drm_mm_node used internally to track progress + * @mm: &drm_mm allocator to walk + * @hole_start: ulong variable to assign the hole start to on each iteration + * @hole_end: ulong variable to assign the hole end to on each iteration + * + * This iterator walks over all holes in the range allocator. It is implemented + * with list_for_each(), so not save against removal of elements. @entry is used + * internally and will not reflect a real drm_mm_node for the very first hole. + * Hence users of this iterator may not access it. + * + * Implementation Note: + * We need to inline list_for_each_entry in order to be able to set hole_start + * and hole_end on each iteration while keeping the macro sane. + */ +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ + for (pos = list_first_entry(&(mm)->hole_stack, \ + typeof(*pos), hole_stack); \ + &pos->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(pos), \ + hole_end = hole_start + pos->hole_size, \ + 1 : 0; \ + pos = list_next_entry(pos, hole_stack)) + +/* + * Basic range manager support (drm_mm.c) + */ +int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); +int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_insert_node_generic - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for this node + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_insert_node_in_range() with no + * range restrictions applied. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + return drm_mm_insert_node_in_range(mm, node, + size, alignment, color, + 0, U64_MAX, mode); +} + +/** + * drm_mm_insert_node - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size) +{ + return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); +} + +void drm_mm_remove_node(struct drm_mm_node *node); +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); +void drm_mm_takedown(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} + +struct drm_mm_node * +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); + +/** + * drm_mm_for_each_node_in_range - iterator to walk over a range of + * allocated nodes + * @node__: drm_mm_node structure to assign to in each iteration step + * @mm__: drm_mm allocator to walk + * @start__: starting offset, the first node will overlap this + * @end__: ending offset, the last node will start before this (but may overlap) + * + * This iterator walks over all nodes in the range allocator that lie + * between @start and @end. It is implemented similarly to list_for_each(), + * but using the internal interval tree to accelerate the search for the + * starting node, and so not safe against removal of elements. It assumes + * that @end is within (or is the upper limit of) the drm_mm allocator. + * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk + * over the special _unallocated_ &drm_mm.head_node, and may even continue + * indefinitely. + */ +#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ + for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ + node__->start < (end__); \ + node__ = list_next_entry(node__, node_list)) + +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_scan_init_with_range() with no range + * restrictions applied. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, mode); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); + +#endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h new file mode 100644 index 000000000..a18f73eb3 --- /dev/null +++ b/include/drm/drm_mode_config.h @@ -0,0 +1,970 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODE_CONFIG_H__ +#define __DRM_MODE_CONFIG_H__ + +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/idr.h> +#include <linux/workqueue.h> +#include <linux/llist.h> + +#include <drm/drm_modeset_lock.h> + +struct drm_file; +struct drm_device; +struct drm_atomic_state; +struct drm_mode_fb_cmd2; +struct drm_format_info; +struct drm_display_mode; + +/** + * struct drm_mode_config_funcs - basic driver provided mode setting functions + * + * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that + * involve drivers. + */ +struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * &struct drm_mode_fb_cmd2 for details. + * + * To validate the pixel format and modifier drivers can use + * drm_any_plane_has_format() to make sure at least one plane supports + * the requested values. Note that the driver must first determine the + * actual modifier used if the request doesn't have it specified, + * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @get_format_info: + * + * Allows a driver to return custom format information for special + * fb layouts (eg. ones with auxiliary compression control planes). + * + * RETURNS: + * + * The format information specific to the given fb metadata, or + * NULL if none is found. + */ + const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ + void (*output_poll_changed)(struct drm_device *dev); + + /** + * @mode_valid: + * + * Device specific validation of display modes. Can be used to reject + * modes that can never be supported. Only device wide constraints can + * be checked here. crtc/encoder/bridge/connector specific constraints + * should be checked in the .mode_valid() hook for each specific object. + */ + enum drm_mode_status (*mode_valid)(struct drm_device *dev, + const struct drm_display_mode *mode); + + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the in this + * callback. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ + int (*atomic_check)(struct drm_device *dev, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for &struct drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Nonblocking commits (as indicated with the nonblock parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Nonblocking commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a &struct drm_event in each CRTC's + * &drm_crtc_state.event. See the documentation for + * &drm_crtc_state.event for more details about the precise semantics of + * this event. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if a nonblocking updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ + int (*atomic_commit)(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ + struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call + * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + */ + void (*atomic_state_free)(struct drm_atomic_state *state); +}; + +/** + * struct drm_mode_config - Mode configuration control structure + * @min_width: minimum fb pixel width on this device + * @min_height: minimum fb pixel height on this device + * @max_width: maximum fb pixel width on this device + * @max_height: maximum fb pixel height on this device + * @funcs: core driver provided mode setting functions + * @fb_base: base address of the framebuffer + * @poll_enabled: track polling support for this device + * @poll_running: track polling status for this device + * @delayed_event: track delayed poll uevent deliver for this device + * @output_poll_work: delayed work for polling in process context + * @preferred_depth: preferred RBG pixel depth, used by fb helpers + * @prefer_shadow: hint to userspace to prefer shadow-fb rendering + * @cursor_width: hint to userspace for max cursor width + * @cursor_height: hint to userspace for max cursor height + * @helper_private: mid-layer private data + * + * Core mode resource tracking structure. All CRTC, encoders, and connectors + * enumerated by the driver are added here, as are global properties. Some + * global restrictions are also here, e.g. dimension restrictions. + */ +struct drm_mode_config { + /** + * @mutex: + * + * This is the big scary modeset BKL which protects everything that + * isn't protect otherwise. Scope is unclear and fuzzy, try to remove + * anything from under its protection and move it into more well-scoped + * locks. + * + * The one important thing this protects is the use of @acquire_ctx. + */ + struct mutex mutex; + + /** + * @connection_mutex: + * + * This protects connector state and the connector to encoder to CRTC + * routing chain. + * + * For atomic drivers specifically this protects &drm_connector.state. + */ + struct drm_modeset_lock connection_mutex; + + /** + * @acquire_ctx: + * + * Global implicit acquire context used by atomic drivers for legacy + * IOCTLs. Deprecated, since implicit locking contexts make it + * impossible to use driver-private &struct drm_modeset_lock. Users of + * this must hold @mutex. + */ + struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @idr_mutex: + * + * Mutex for KMS ID allocation and management. Protects both @object_idr + * and @tile_idr. + */ + struct mutex idr_mutex; + + /** + * @object_idr: + * + * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, + * connector, modes - just makes life easier to have only one. + */ + struct idr object_idr; + + /** + * @tile_idr: + * + * Use this idr for allocating new IDs for tiled sinks like use in some + * high-res DP MST screens. + */ + struct idr tile_idr; + + /** @fb_lock: Mutex to protect fb the global @fb_list and @num_fb. */ + struct mutex fb_lock; + /** @num_fb: Number of entries on @fb_list. */ + int num_fb; + /** @fb_list: List of all &struct drm_framebuffer. */ + struct list_head fb_list; + + /** + * @connector_list_lock: Protects @num_connector and + * @connector_list and @connector_free_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. + */ + int num_connector; + /** + * @connector_ida: ID allocator for connector indices. + */ + struct ida connector_ida; + /** + * @connector_list: + * + * List of connector objects linked with &drm_connector.head. Protected + * by @connector_list_lock. Only use drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to walk this list. + */ + struct list_head connector_list; + /** + * @connector_free_list: + * + * List of connector objects linked with &drm_connector.free_head. + * Protected by @connector_list_lock. Used by + * drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to savely free connectors using + * @connector_free_work. + */ + struct llist_head connector_free_list; + /** + * @connector_free_work: Work to clean up @connector_free_list. + */ + struct work_struct connector_free_work; + + /** + * @num_encoder: + * + * Number of encoders on this device. This is invariant over the + * lifetime of a device and hence doesn't need any locks. + */ + int num_encoder; + /** + * @encoder_list: + * + * List of encoder objects linked with &drm_encoder.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head encoder_list; + + /** + * @num_total_plane: + * + * Number of universal (i.e. with primary/curso) planes on this device. + * This is invariant over the lifetime of a device and hence doesn't + * need any locks. + */ + int num_total_plane; + /** + * @plane_list: + * + * List of plane objects linked with &drm_plane.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head plane_list; + + /** + * @num_crtc: + * + * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime + * of a device and hence doesn't need any locks. + */ + int num_crtc; + /** + * @crtc_list: + * + * List of CRTC objects linked with &drm_crtc.head. This is invariant + * over the lifetime of a device and hence doesn't need any locks. + */ + struct list_head crtc_list; + + /** + * @property_list: + * + * List of property type objects linked with &drm_property.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head property_list; + + /** + * @privobj_list: + * + * List of private objects linked with &drm_private_obj.head. This is + * invariant over the lifetime of a device and hence doesn't need any + * locks. + */ + struct list_head privobj_list; + + int min_width, min_height; + int max_width, max_height; + const struct drm_mode_config_funcs *funcs; + resource_size_t fb_base; + + /* output poll support */ + bool poll_enabled; + bool poll_running; + bool delayed_event; + struct delayed_work output_poll_work; + + /** + * @blob_lock: + * + * Mutex for blob property allocation and management, protects + * @property_blob_list and &drm_file.blobs. + */ + struct mutex blob_lock; + + /** + * @property_blob_list: + * + * List of all the blob property objects linked with + * &drm_property_blob.head. Protected by @blob_lock. + */ + struct list_head property_blob_list; + + /* pointers to standard properties */ + + /** + * @edid_property: Default connector property to hold the EDID of the + * currently connected sink, if any. + */ + struct drm_property *edid_property; + /** + * @dpms_property: Default connector property to control the + * connector's DPMS state. + */ + struct drm_property *dpms_property; + /** + * @path_property: Default connector property to hold the DP MST path + * for the port. + */ + struct drm_property *path_property; + /** + * @tile_property: Default connector property to store the tile + * position of a tiled screen, for sinks which need to be driven with + * multiple CRTCs. + */ + struct drm_property *tile_property; + /** + * @link_status_property: Default connector property for link status + * of a connector + */ + struct drm_property *link_status_property; + /** + * @plane_type_property: Default plane property to differentiate + * CURSOR, PRIMARY and OVERLAY legacy uses of planes. + */ + struct drm_property *plane_type_property; + /** + * @prop_src_x: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_x; + /** + * @prop_src_y: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_y; + /** + * @prop_src_w: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_w; + /** + * @prop_src_h: Default atomic plane property for the plane source + * position in the connected &drm_framebuffer. + */ + struct drm_property *prop_src_h; + /** + * @prop_crtc_x: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_x; + /** + * @prop_crtc_y: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_y; + /** + * @prop_crtc_w: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_w; + /** + * @prop_crtc_h: Default atomic plane property for the plane destination + * position in the &drm_crtc is being shown on. + */ + struct drm_property *prop_crtc_h; + /** + * @prop_fb_id: Default atomic plane property to specify the + * &drm_framebuffer. + */ + struct drm_property *prop_fb_id; + /** + * @prop_in_fence_fd: Sync File fd representing the incoming fences + * for a Plane. + */ + struct drm_property *prop_in_fence_fd; + /** + * @prop_out_fence_ptr: Sync File fd pointer representing the + * outgoing fences for a CRTC. Userspace should provide a pointer to a + * value of type s32, and then cast that pointer to u64. + */ + struct drm_property *prop_out_fence_ptr; + /** + * @prop_crtc_id: Default atomic plane property to specify the + * &drm_crtc. + */ + struct drm_property *prop_crtc_id; + /** + * @prop_fb_damage_clips: Optional plane property to mark damaged + * regions on the plane in framebuffer coordinates of the framebuffer + * attached to the plane. + * + * The layout of blob data is simply an array of &drm_mode_rect. Unlike + * plane src coordinates, damage clips are not in 16.16 fixed point. + */ + struct drm_property *prop_fb_damage_clips; + /** + * @prop_active: Default atomic CRTC property to control the active + * state, which is the simplified implementation for DPMS in atomic + * drivers. + */ + struct drm_property *prop_active; + /** + * @prop_mode_id: Default atomic CRTC property to set the mode for a + * CRTC. A 0 mode implies that the CRTC is entirely disabled - all + * connectors must be of and active must be set to disabled, too. + */ + struct drm_property *prop_mode_id; + /** + * @prop_vrr_enabled: Default atomic CRTC property to indicate + * whether variable refresh rate should be enabled on the CRTC. + */ + struct drm_property *prop_vrr_enabled; + + /** + * @dvi_i_subconnector_property: Optional DVI-I property to + * differentiate between analog or digital mode. + */ + struct drm_property *dvi_i_subconnector_property; + /** + * @dvi_i_select_subconnector_property: Optional DVI-I property to + * select between analog or digital mode. + */ + struct drm_property *dvi_i_select_subconnector_property; + + /** + * @dp_subconnector_property: Optional DP property to differentiate + * between different DP downstream port types. + */ + struct drm_property *dp_subconnector_property; + + /** + * @tv_subconnector_property: Optional TV property to differentiate + * between different TV connector types. + */ + struct drm_property *tv_subconnector_property; + /** + * @tv_select_subconnector_property: Optional TV property to select + * between different TV connector types. + */ + struct drm_property *tv_select_subconnector_property; + /** + * @tv_mode_property: Optional TV property to select + * the output TV mode. + */ + struct drm_property *tv_mode_property; + /** + * @tv_left_margin_property: Optional TV property to set the left + * margin (expressed in pixels). + */ + struct drm_property *tv_left_margin_property; + /** + * @tv_right_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_right_margin_property; + /** + * @tv_top_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_top_margin_property; + /** + * @tv_bottom_margin_property: Optional TV property to set the right + * margin (expressed in pixels). + */ + struct drm_property *tv_bottom_margin_property; + /** + * @tv_brightness_property: Optional TV property to set the + * brightness. + */ + struct drm_property *tv_brightness_property; + /** + * @tv_contrast_property: Optional TV property to set the + * contrast. + */ + struct drm_property *tv_contrast_property; + /** + * @tv_flicker_reduction_property: Optional TV property to control the + * flicker reduction mode. + */ + struct drm_property *tv_flicker_reduction_property; + /** + * @tv_overscan_property: Optional TV property to control the overscan + * setting. + */ + struct drm_property *tv_overscan_property; + /** + * @tv_saturation_property: Optional TV property to set the + * saturation. + */ + struct drm_property *tv_saturation_property; + /** + * @tv_hue_property: Optional TV property to set the hue. + */ + struct drm_property *tv_hue_property; + + /** + * @scaling_mode_property: Optional connector property to control the + * upscaling, mostly used for built-in panels. + */ + struct drm_property *scaling_mode_property; + /** + * @aspect_ratio_property: Optional connector property to control the + * HDMI infoframe aspect ratio setting. + */ + struct drm_property *aspect_ratio_property; + /** + * @content_type_property: Optional connector property to control the + * HDMI infoframe content type setting. + */ + struct drm_property *content_type_property; + /** + * @degamma_lut_property: Optional CRTC property to set the LUT used to + * convert the framebuffer's colors to linear gamma. + */ + struct drm_property *degamma_lut_property; + /** + * @degamma_lut_size_property: Optional CRTC property for the size of + * the degamma LUT as supported by the driver (read-only). + */ + struct drm_property *degamma_lut_size_property; + /** + * @ctm_property: Optional CRTC property to set the + * matrix used to convert colors after the lookup in the + * degamma LUT. + */ + struct drm_property *ctm_property; + /** + * @gamma_lut_property: Optional CRTC property to set the LUT used to + * convert the colors, after the CTM matrix, to the gamma space of the + * connected screen. + */ + struct drm_property *gamma_lut_property; + /** + * @gamma_lut_size_property: Optional CRTC property for the size of the + * gamma LUT as supported by the driver (read-only). + */ + struct drm_property *gamma_lut_size_property; + + /** + * @suggested_x_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_x_property; + /** + * @suggested_y_property: Optional connector property with a hint for + * the position of the output on the host's screen. + */ + struct drm_property *suggested_y_property; + + /** + * @non_desktop_property: Optional connector property with a hint + * that device isn't a standard display, and the console/desktop, + * should not be displayed on it. + */ + struct drm_property *non_desktop_property; + + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + + /** + * @writeback_fb_id_property: Property for writeback connectors, storing + * the ID of the output framebuffer. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_fb_id_property; + + /** + * @writeback_pixel_formats_property: Property for writeback connectors, + * storing an array of the supported pixel formats for the writeback + * engine (read-only). + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_pixel_formats_property; + /** + * @writeback_out_fence_ptr_property: Property for writeback connectors, + * fd pointer representing the outgoing fences for a writeback + * connector. Userspace should provide a pointer to a value of type s32, + * and then cast that pointer to u64. + * See also: drm_writeback_connector_init() + */ + struct drm_property *writeback_out_fence_ptr_property; + + /** + * @hdr_output_metadata_property: Connector property containing hdr + * metatada. This will be provided by userspace compositors based + * on HDR content + */ + struct drm_property *hdr_output_metadata_property; + + /** + * @content_protection_property: DRM ENUM property for content + * protection. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *content_protection_property; + + /** + * @hdcp_content_type_property: DRM ENUM property for type of + * Protected Content. + */ + struct drm_property *hdcp_content_type_property; + + /* dumb ioctl parameters */ + uint32_t preferred_depth, prefer_shadow; + + /** + * @prefer_shadow_fbdev: + * + * Hint to framebuffer emulation to prefer shadow-fb rendering. + */ + bool prefer_shadow_fbdev; + + /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + + /** + * @quirk_addfb_prefer_xbgr_30bpp: + * + * Special hack for legacy ADDFB to keep nouveau userspace happy. Should + * only ever be set by the nouveau kernel driver. + */ + bool quirk_addfb_prefer_xbgr_30bpp; + + /** + * @quirk_addfb_prefer_host_byte_order: + * + * When set to true drm_mode_addfb() will pick host byte order + * pixel_format when calling drm_mode_addfb2(). This is how + * drm_mode_addfb() should have worked from day one. It + * didn't though, so we ended up with quirks in both kernel + * and userspace drivers to deal with the broken behavior. + * Simply fixing drm_mode_addfb() unconditionally would break + * these drivers, so add a quirk bit here to allow drivers + * opt-in. + */ + bool quirk_addfb_prefer_host_byte_order; + + /** + * @async_page_flip: Does this device support async flips on the primary + * plane? + */ + bool async_page_flip; + + /** + * @allow_fb_modifiers: + * + * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + */ + bool allow_fb_modifiers; + + /** + * @normalize_zpos: + * + * If true the drm core will call drm_atomic_normalize_zpos() as part of + * atomic mode checking from drm_atomic_helper_check() + */ + bool normalize_zpos; + + /** + * @modifiers_property: Plane property to list support modifier/format + * combination. + */ + struct drm_property *modifiers_property; + + /* cursor size */ + uint32_t cursor_width, cursor_height; + + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + + const struct drm_mode_config_helper_funcs *helper_private; +}; + +int __must_check drmm_mode_config_init(struct drm_device *dev); + +/** + * drm_mode_config_init - DRM mode_configuration structure initialization + * @dev: DRM device + * + * This is the unmanaged version of drmm_mode_config_init() for drivers which + * still explicitly call drm_mode_config_cleanup(). + * + * FIXME: This function is deprecated and drivers should be converted over to + * drmm_mode_config_init(). + */ +static inline int drm_mode_config_init(struct drm_device *dev) +{ + return drmm_mode_config_init(dev); +} + +void drm_mode_config_reset(struct drm_device *dev); +void drm_mode_config_cleanup(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h new file mode 100644 index 000000000..c34a3e803 --- /dev/null +++ b/include/drm/drm_mode_object.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_MODESET_H__ +#define __DRM_MODESET_H__ + +#include <linux/kref.h> +#include <drm/drm_lease.h> +struct drm_object_properties; +struct drm_property; +struct drm_device; +struct drm_file; + +/** + * struct drm_mode_object - base structure for modeset objects + * @id: userspace visible identifier + * @type: type of the object, one of DRM_MODE_OBJECT\_\* + * @properties: properties attached to this object, including values + * @refcount: reference count for objects which with dynamic lifetime + * @free_cb: free function callback, only set for objects with dynamic lifetime + * + * Base structure for modeset objects visible to userspace. Objects can be + * looked up using drm_mode_object_find(). Besides basic uapi interface + * properties like @id and @type it provides two services: + * + * - It tracks attached properties and their values. This is used by &drm_crtc, + * &drm_plane and &drm_connector. Properties are attached by calling + * drm_object_attach_property() before the object is visible to userspace. + * + * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it + * provides reference counting through drm_mode_object_get() and + * drm_mode_object_put(). This is used by &drm_framebuffer, &drm_connector + * and &drm_property_blob. These objects provide specialized reference + * counting wrappers. + */ +struct drm_mode_object { + uint32_t id; + uint32_t type; + struct drm_object_properties *properties; + struct kref refcount; + void (*free_cb)(struct kref *kref); +}; + +#define DRM_OBJECT_MAX_PROPERTY 24 +/** + * struct drm_object_properties - property tracking for &drm_mode_object + */ +struct drm_object_properties { + /** + * @count: number of valid properties, must be less than or equal to + * DRM_OBJECT_MAX_PROPERTY. + */ + + int count; + /** + * @properties: Array of pointers to &drm_property. + * + * NOTE: if we ever start dynamically destroying properties (ie. + * not at drm_mode_config_cleanup() time), then we'd have to do + * a better job of detaching property from mode objects to avoid + * dangling property pointers: + */ + struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY]; + + /** + * @values: Array to store the property values, matching @properties. Do + * not read/write values directly, but use + * drm_object_property_get_value() and drm_object_property_set_value(). + * + * Note that atomic drivers do not store mutable properties in this + * array, but only the decoded values in the corresponding state + * structure. The decoding is done using the &drm_crtc.atomic_get_property and + * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For + * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and + * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector + * the hooks are &drm_connector_funcs.atomic_get_property and + * &drm_connector_funcs.atomic_set_property . + * + * Hence atomic drivers should not use drm_object_property_set_value() + * and drm_object_property_get_value() on mutable objects, i.e. those + * without the DRM_MODE_PROP_IMMUTABLE flag set. + */ + uint64_t values[DRM_OBJECT_MAX_PROPERTY]; +}; + +/* Avoid boilerplate. I'm tired of typing. */ +#define DRM_ENUM_NAME_FN(fnname, list) \ + const char *fnname(int val) \ + { \ + int i; \ + for (i = 0; i < ARRAY_SIZE(list); i++) { \ + if (list[i].type == val) \ + return list[i].name; \ + } \ + return "(unknown)"; \ + } + +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, uint32_t type); +void drm_mode_object_get(struct drm_mode_object *obj); +void drm_mode_object_put(struct drm_mode_object *obj); + +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t val); +int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *value); + +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val); + +bool drm_mode_object_lease_required(uint32_t type); +#endif diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h new file mode 100644 index 000000000..cdf2a299c --- /dev/null +++ b/include/drm/drm_modes.h @@ -0,0 +1,508 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * Copyright © 2014 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __DRM_MODES_H__ +#define __DRM_MODES_H__ + +#include <linux/hdmi.h> + +#include <drm/drm_mode_object.h> +#include <drm/drm_connector.h> + +struct videomode; + +/* + * Note on terminology: here, for brevity and convenience, we refer to connector + * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, + * DVI, etc. And 'screen' refers to the whole of the visible display, which + * may span multiple monitors (and therefore multiple CRTC and connector + * structures). + */ + +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal vertical timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_NO_420: ycbcr 420 modes not supported + * @MODE_STALE: mode has become stale + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ +enum drm_mode_status { + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_NO_420, + MODE_STALE = -3, + MODE_BAD = -2, + MODE_ERROR = -1 +}; + +#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ + .name = nm, .status = 0, .type = (t), .clock = (c), \ + .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ + .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ + .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ + .vscan = (vs), .flags = (f) + +/** + * DRM_SIMPLE_MODE - Simple display mode + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro initializes a &drm_display_mode that only contains info about + * resolution and physical size. + */ +#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \ + .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \ + .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \ + .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \ + .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \ + .height_mm = (vd_mm) + +#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ +#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ +#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ +#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ +#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) + +#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF + +#define DRM_MODE_MATCH_TIMINGS (1 << 0) +#define DRM_MODE_MATCH_CLOCK (1 << 1) +#define DRM_MODE_MATCH_FLAGS (1 << 2) +#define DRM_MODE_MATCH_3D_FLAGS (1 << 3) +#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) + +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * The horizontal and vertical timings are defined per the following diagram. + * + * :: + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + * + * For printing you can use %DRM_MODE_FMT and DRM_MODE_ARG(). + */ +struct drm_display_mode { + /** + * @clock: + * + * Pixel clock in kHz. + */ + int clock; /* in kHz */ + u16 hdisplay; + u16 hsync_start; + u16 hsync_end; + u16 htotal; + u16 hskew; + u16 vdisplay; + u16 vsync_start; + u16 vsync_end; + u16 vtotal; + u16 vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: <deprecated> + * - DRM_MODE_FLAG_PIXMUX: <deprecated> + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ + u32 flags; + + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; + u16 crtc_hdisplay; + u16 crtc_hblank_start; + u16 crtc_hblank_end; + u16 crtc_hsync_start; + u16 crtc_hsync_end; + u16 crtc_htotal; + u16 crtc_hskew; + u16 crtc_vdisplay; + u16 crtc_vblank_start; + u16 crtc_vblank_end; + u16 crtc_vsync_start; + u16 crtc_vsync_end; + u16 crtc_vtotal; + + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ + u16 height_mm; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel + * command line. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI. + * We no longer accept modes with these types though: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused. + * Use DRM_MODE_TYPE_DRIVER instead. + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + */ + u8 type; + + /** + * @expose_to_userspace: + * + * Indicates whether the mode is to be exposed to the userspace. + * This is to maintain a set of exposed modes while preparing + * user-mode's list in drm_mode_getconnector ioctl. The purpose of + * this only lies in the ioctl function, and is not to be used + * outside the function. + */ + bool expose_to_userspace; + + /** + * @head: + * + * struct list_head for mode lists. + */ + struct list_head head; + + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ + enum drm_mode_status status; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ + enum hdmi_picture_aspect picture_aspect_ratio; + +}; + +/** + * DRM_MODE_FMT - printf string for &struct drm_display_mode + */ +#define DRM_MODE_FMT "\"%s\": %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" + +/** + * DRM_MODE_ARG - printf arguments for &struct drm_display_mode + * @m: display mode + */ +#define DRM_MODE_ARG(m) \ + (m)->name, drm_mode_vrefresh(m), (m)->clock, \ + (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ + (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ + (m)->type, (m)->flags + +#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) + +/** + * drm_mode_is_stereo - check for stereo mode flags + * @mode: drm_display_mode to check + * + * Returns: + * True if the mode is one of the stereo modes (like side-by-side), false if + * not. + */ +static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) +{ + return mode->flags & DRM_MODE_FLAG_3D_MASK; +} + +struct drm_connector; +struct drm_cmdline_mode; + +struct drm_display_mode *drm_mode_create(struct drm_device *dev); +void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); +void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, + const struct drm_display_mode *in); +int drm_mode_convert_umode(struct drm_device *dev, + struct drm_display_mode *out, + const struct drm_mode_modeinfo *in); +void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); +void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode); + +struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool reduced, bool interlaced, + bool margins); +struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, + int hdisplay, int vdisplay, int vrefresh, + bool interlaced, int margins); +struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, + int hdisplay, int vdisplay, + int vrefresh, bool interlaced, + int margins, + int GTF_M, int GTF_2C, + int GTF_K, int GTF_2J); +void drm_display_mode_from_videomode(const struct videomode *vm, + struct drm_display_mode *dmode); +void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, + struct videomode *vm); +void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); +int of_get_drm_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags, + int index); + +void drm_mode_set_name(struct drm_display_mode *mode); +int drm_mode_vrefresh(const struct drm_display_mode *mode); +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); + +void drm_mode_set_crtcinfo(struct drm_display_mode *p, + int adjust_flags); +void drm_mode_copy(struct drm_display_mode *dst, + const struct drm_display_mode *src); +struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, + const struct drm_display_mode *mode); +bool drm_mode_match(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2, + unsigned int match_flags); +bool drm_mode_equal(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); + +/* for use by the crtc helper probe functions */ +enum drm_mode_status drm_mode_validate_driver(struct drm_device *dev, + const struct drm_display_mode *mode); +enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, + int maxX, int maxY); +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector); +void drm_mode_prune_invalid(struct drm_device *dev, + struct list_head *mode_list, bool verbose); +void drm_mode_sort(struct list_head *mode_list); +void drm_connector_list_update(struct drm_connector *connector); + +/* parsing cmdline modes */ +bool +drm_mode_parse_command_line_for_connector(const char *mode_option, + const struct drm_connector *connector, + struct drm_cmdline_mode *mode); +struct drm_display_mode * +drm_mode_create_from_cmdline_mode(struct drm_device *dev, + struct drm_cmdline_mode *cmd); + +#endif /* __DRM_MODES_H__ */ diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h new file mode 100644 index 000000000..995fd981c --- /dev/null +++ b/include/drm/drm_modeset_helper.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_KMS_HELPER_H__ +#define __DRM_KMS_HELPER_H__ + +struct drm_crtc; +struct drm_crtc_funcs; +struct drm_device; +struct drm_framebuffer; +struct drm_mode_fb_cmd2; + +void drm_helper_move_panel_connectors_to_head(struct drm_device *); + +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd); + +int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs); + +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + +#endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 000000000..4efec30f8 --- /dev/null +++ b/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,1411 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes <jesse.barnes@intel.com> + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter <daniel.vetter@ffwll.ch> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; +struct drm_writeback_connector; +struct drm_writeback_job; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_crtc_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @atomic_enable and @atomic_disable should be used. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_disable + * should be used. + */ + void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @atomic_enable + * should be used. + */ + void (*commit)(struct drm_crtc *crtc); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * crtc. This should be implemented if the crtc has some sort of + * restriction in the modes it can display. For example, a given crtc + * may be responsible to set a clock value. If the clock can not + * produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup + * vfunc. If the CRTC cannot support the requested conversion from mode + * to adjusted_mode it should reject the modeset. See also + * &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback, but note that they're not + * perfectly equivalent: @mode_valid is called from + * drm_atomic_helper_check_modeset(), but @atomic_check is called from + * drm_atomic_helper_check_planes(), because originally it was meant for + * plane update checks only. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @atomic_enable callback. + * + * This callback is optional. + */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the @mode_set callback. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @atomic_enable for atomic drivers. + * Atomic drivers should consider to use @atomic_disable instead of + * this one. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_crtc *crtc); + + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() this hook is called + * after the &drm_plane_helper_funcs.atomic_check hook for planes, which + * allows drivers to assign shared resources requested by planes in this + * callback here. For more complicated dependencies the driver can call + * the provided check helpers multiple times until the computed state + * has a final configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any CRTC constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the CRTC level. To ensure that runtime + * PM handling (using either DPMS or the new "ACTIVE" property) works + * @atomic_enable must be the inverse of @atomic_disable for atomic + * drivers. + * + * Drivers can use the @old_crtc_state input parameter if the operations + * needed to enable the CRTC don't depend solely on the new state but + * also on the transition between the old state and the new state. + * + * This function is optional. + */ + void (*atomic_enable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** + * @atomic_disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers. Atomic drivers don't + * need to implement it if there's no need to disable anything at the + * CRTC level. + * + * Comparing to @disable, this one provides the additional input + * parameter @old_crtc_state which could be used to access the old + * state. Atomic drivers should consider to use this one instead + * of @disable. + * + * This function is optional. + */ + void (*atomic_disable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + + /** + * @get_scanout_position: + * + * Called by vblank timestamping code. + * + * Returns the current display scanout position from a CRTC and an + * optional accurate ktime_get() timestamp of when the position was + * measured. Note that this is a helper callback which is only used + * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp() + * for the @drm_crtc_funcs.get_vblank_timestamp callback. + * + * Parameters: + * + * crtc: + * The CRTC. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq + * quirks if the flag is set. + * vpos: + * Target location for current vertical scanout position. + * hpos: + * Target location for current horizontal scanout position. + * stime: + * Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * etime: + * Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * mode: + * Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * Returns: + * + * True on success, false if a reliable scanout position counter could + * not be read out. + */ + bool (*get_scanout_position)(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in this + * encoder. This should be implemented if the encoder has some sort + * of restriction in the modes it can display. For example, a given + * encoder may be responsible to set a clock value. If the clock can + * not produce all the values for the available modes then this callback + * can be used to restrict the number of modes to only the ones that + * can be displayed. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardward constraints. Any further + * limits which depend upon the configuration can only be checked in + * @mode_fixup or @atomic_check. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_encoder *crtc, + const struct drm_display_mode *mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. See + * also &drm_crtc_state.adjusted_mode for more details. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * This hook is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. If @atomic_check is used, + * this hook isn't called since @atomic_check allows a strict superset + * of the functionality of @mode_fixup. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + * + * NOTE: + * + * If the driver uses the atomic modeset helpers and needs to inspect + * the connector state or connector display info during mode setting, + * @atomic_mode_set can be used instead. + */ + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @atomic_mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspended using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the @enable callback. + * + * This callback is used by the atomic modeset helpers in place of the + * @mode_set callback, if set by the driver. It is optional and should + * be used instead of @mode_set if the driver needs to inspect the + * connector state or display info, since there is no direct way to + * go from the encoder to the current connector. + */ + void (*atomic_mode_set)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + + /** + * @atomic_disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.atomic_disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @disable that provides the atomic state + * to the driver. If @atomic_disable is implemented, @disable is not + * called by the helpers. + * + * This hook is only used by atomic helpers. Atomic drivers don't need + * to implement it if there's no need to disable anything at the encoder + * level. To ensure that runtime PM handling (using either DPMS or the + * new "ACTIVE" property) works @atomic_disable must be the inverse of + * @atomic_enable. + */ + void (*atomic_disable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the encoder. It is called + * after this encoder's CRTC has been enabled using their own + * &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @enable that provides the atomic state + * to the driver. If @atomic_enable is implemented, @enable is not + * called by the helpers. + * + * This hook is only used by atomic helpers, it is the opposite of + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the encoder level. To ensure that + * runtime PM handling works @atomic_enable must be the inverse of + * @atomic_disable. + */ + void (*atomic_enable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * For atomic drivers also consider @atomic_disable and save yourself + * from having to read the NOTE below! + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_encoder *encoder); + + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * their own &drm_crtc_helper_funcs.enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is only used by atomic helpers, it is the opposite of + * @disable. Atomic drivers don't need to implement it if there's no + * need to enable anything at the encoder level. To ensure that + * runtime PM handling (using either DPMS or the new "ACTIVE" property) + * works @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_encoder *encoder); + + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * Since this provides a strict superset of the functionality of + * @mode_fixup (the requested and adjusted modes are both available + * through the passed in &struct drm_crtc_state) @mode_fixup is not + * called when @atomic_check is implemented. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * Also beware that userspace can request its own custom modes, neither + * core nor helpers filter modes to the list of probe modes reported by + * the GETCONNECTOR IOCTL and stored in &drm_connector.modes. To ensure + * that modes are filtered consistently put any encoder constraints and + * limits checks into @mode_valid. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for an encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. + */ +struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the &drm_connector.probed_modes list. It should also update the + * EDID property by calling drm_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that driver a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the &drm_connector.display_info, + * &drm_connector.width_mm and &drm_connector.height_mm fields are + * filled in. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * This function is only called after the @detect hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_connector *connector); + + /** + * @detect_ctx: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * This callback is optional, if not implemented the connector will be + * considered as always being attached. + * + * This is the atomic version of &drm_connector_funcs.detect. + * + * To avoid races against concurrent connector state updates, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to this ctx. This allows taking additional + * locks as required. + * + * RETURNS: + * + * &drm_connector_status indicating the connector's status, + * or the error code returned by drm_modeset_lock(), -EDEADLK. + */ + int (*detect_ctx)(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional. + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECTOR IOCTL. Compared to &drm_encoder_helper_funcs.mode_valid, + * &drm_crtc_helper_funcs.mode_valid and &drm_bridge_funcs.mode_valid, + * which are also called by the atomic helpers from + * drm_atomic_helper_check_modeset(). This allows userspace to force and + * ignore sink constraint (like the pixel clock limits in the screen's + * EDID), which is useful for e.g. testing, or working around a broken + * EDID. Any source hardware constraint (which always need to be + * enforced) therefore should be checked in one of the above callbacks, + * and not this one here. + * + * To avoid races with concurrent connector state updates, the helper + * libraries always call this with the &drm_mode_config.connection_mutex + * held. Because of this it's safe to inspect &drm_connector->state. + * + * RETURNS: + * + * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum + * drm_mode_status. + */ + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + + /** + * @mode_valid_ctx: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional, and is the atomic version of + * &drm_connector_helper_funcs.mode_valid. + * + * To allow for accessing the atomic state of modesetting objects, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to @ctx. This allows for taking additional + * locks as required. + * + * Even though additional locks may be acquired, this callback is + * still expected not to take any constraints into account which would + * be influenced by the currently set display state - such constraints + * should be handled in the driver's atomic check. For example, if a + * connector shares display bandwidth with other connectors then it + * would be ok to validate the minimum bandwidth requirement of a mode + * against the maximum possible bandwidth of the connector. But it + * wouldn't be ok to take the current bandwidth usage of other + * connectors into account, as this would change depending on the + * display state. + * + * Returns: + * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote + * the &enum drm_mode_status value to @status, or a negative error + * code otherwise. + * + */ + int (*mode_valid_ctx)(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status); + + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * You can leave this function to NULL if the connector is only + * attached to a single encoder. In this case, the core will call + * drm_connector_get_single_encoder() for you. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset(). + * If it is not implemented, the core will fallback to @best_encoder + * (or drm_connector_get_single_encoder() if @best_encoder is NULL). + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); + + /** + * @atomic_check: + * + * This hook is used to validate connector state. This function is + * called from &drm_atomic_helper_check_modeset, and is called when + * a connector property is set, or a modeset on the crtc is forced. + * + * Because &drm_atomic_helper_check_modeset may be called multiple times, + * this function should handle being called multiple times as well. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_connector *connector, + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This hook is to be used by drivers implementing writeback connectors + * that need a point when to commit the writeback job to the hardware. + * The writeback_job to commit is available in + * &drm_connector_state.writeback_job. + * + * This hook is optional. + * + * This callback is used by the atomic modeset helpers. + */ + void (*atomic_commit)(struct drm_connector *connector, + struct drm_connector_state *state); + + /** + * @prepare_writeback_job: + * + * As writeback jobs contain a framebuffer, drivers may need to + * prepare and clean them up the same way they can prepare and + * clean up framebuffers for planes. This optional connector operation + * is used to support the preparation of writeback jobs. The job + * prepare operation is called from drm_atomic_helper_prepare_planes() + * for struct &drm_writeback_connector connectors only. + * + * This operation is optional. + * + * This callback is used by the atomic modeset helpers. + */ + int (*prepare_writeback_job)(struct drm_writeback_connector *connector, + struct drm_writeback_job *job); + /** + * @cleanup_writeback_job: + * + * This optional connector operation is used to support the + * cleanup of writeback jobs. The job cleanup operation is called + * from the existing drm_writeback_cleanup_job() function, invoked + * both when destroying the job as part of an aborted commit, or when + * the job completes. + * + * This operation is optional. + * + * This callback is used by the atomic modeset helpers. + */ + void (*cleanup_writeback_job)(struct drm_writeback_connector *connector, + struct drm_writeback_job *job); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for planes + * + * These functions are used by the atomic helpers and by the transitional plane + * helpers. + */ +struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * its backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the &drm_plane_state.fence of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * Drivers which always have their buffers pinned should use + * drm_gem_fb_prepare_fb() for this hook. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ + int (*prepare_fb)(struct drm_plane *plane, + struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*cleanup_fb)(struct drm_plane *plane, + struct drm_plane_state *old_state); + + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's @atomic_check + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check and compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the + * &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * &drm_crtc_helper_funcs.disable hook. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); + + /** + * @atomic_async_check: + * + * Drivers should set this function pointer to check if the plane state + * can be updated in a async fashion. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_async_check() to establish if a + * given update can be committed asynchronously, that is, if it can + * jump ahead of the state currently queued for update. + * + * RETURNS: + * + * Return 0 on success and any error returned indicates that the update + * can not be applied in asynchronous manner. + */ + int (*atomic_async_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_async_update: + * + * Drivers should set this function pointer to perform asynchronous + * updates of planes, that is, jump ahead of the currently queued + * state and update the plane. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_helper_async_commit(). + * + * An async update will happen on legacy cursor updates. An async + * update won't happen if there is an outstanding commit modifying + * the same plane. + * + * Note that unlike &drm_plane_helper_funcs.atomic_update this hook + * takes the new &drm_plane_state as parameter. When doing async_update + * drivers shouldn't replace the &drm_plane_state but update the + * current one with the new plane configurations in the new + * plane_state. + * + * Drivers should also swap the framebuffers between current plane + * state (&drm_plane.state) and new_state. + * This is required since cleanup for async commits is performed on + * the new state, rather than old state like for traditional commits. + * Since we want to give up the reference on the current (old) fb + * instead of our brand new one, swap them in the driver during the + * async commit. + * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet + * - Some hw might still scan out the old buffer until the next + * vblank, however we let go of the fb references as soon as + * we run this hook. For now drivers must implement their own workers + * for deferring if needed, until a common solution is created. + */ + void (*atomic_async_update)(struct drm_plane *plane, + struct drm_plane_state *new_state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +/** + * struct drm_mode_config_helper_funcs - global modeset helper operations + * + * These helper functions are used by the atomic helpers. + */ +struct drm_mode_config_helper_funcs { + /** + * @atomic_commit_tail: + * + * This hook is used by the default atomic_commit() hook implemented in + * drm_atomic_helper_commit() together with the nonblocking commit + * helpers (see drm_atomic_helper_setup_commit() for a starting point) + * to implement blocking and nonblocking commits easily. It is not used + * by the atomic helpers + * + * This function is called when the new atomic state has already been + * swapped into the various state pointers. The passed in state + * therefore contains copies of the old/previous state. This hook should + * commit the new state into hardware. Note that the helpers have + * already waited for preceeding atomic commits and fences, but drivers + * can add more waiting calls at the start of their implementation, e.g. + * to wait for driver-internal request for implicit syncing, before + * starting to commit the update to the hardware. + * + * After the atomic update is committed to the hardware this hook needs + * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate + * to be executed by the hardware, for example using + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(), and then clean up the old + * framebuffers using drm_atomic_helper_cleanup_planes(). + * + * When disabling a CRTC this hook _must_ stall for the commit to + * complete. Vblank waits don't work on disabled CRTC, hence the core + * can't take care of this. And it also can't rely on the vblank event, + * since that can be signalled already when the screen shows black, + * which can happen much earlier than the last hardware access needed to + * shut off the display pipeline completely. + * + * This hook is optional, the default implementation is + * drm_atomic_helper_commit_tail(). + */ + void (*atomic_commit_tail)(struct drm_atomic_state *state); +}; + +#endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h new file mode 100644 index 000000000..aafd07388 --- /dev/null +++ b/include/drm/drm_modeset_lock.h @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_MODESET_LOCK_H_ +#define DRM_MODESET_LOCK_H_ + +#include <linux/ww_mutex.h> + +struct drm_modeset_lock; + +/** + * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) + * @ww_ctx: base acquire ctx + * @contended: used internally for -EDEADLK handling + * @locked: list of held locks + * @trylock_only: trylock mode used in atomic contexts/panic notifiers + * @interruptible: whether interruptible locking should be used. + * + * Each thread competing for a set of locks must use one acquire + * ctx. And if any lock fxn returns -EDEADLK, it must backoff and + * retry. + */ +struct drm_modeset_acquire_ctx { + + struct ww_acquire_ctx ww_ctx; + + /* + * Contended lock: if a lock is contended you should only call + * drm_modeset_backoff() which drops locks and slow-locks the + * contended lock. + */ + struct drm_modeset_lock *contended; + + /* + * list of held locks (drm_modeset_lock) + */ + struct list_head locked; + + /* + * Trylock mode, use only for panic handlers! + */ + bool trylock_only; + + /* Perform interruptible waits on this context. */ + bool interruptible; +}; + +/** + * struct drm_modeset_lock - used for locking modeset resources. + * @mutex: resource locking + * @head: used to hold its place on &drm_atomi_state.locked list when + * part of an atomic update + * + * Used for locking CRTCs and other modeset resources. + */ +struct drm_modeset_lock { + /* + * modeset lock + */ + struct ww_mutex mutex; + + /* + * Resources that are locked as part of an atomic update are added + * to a list (so we know what to unlock at the end). + */ + struct list_head head; +}; + +#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0) + +void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, + uint32_t flags); +void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); +void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); +int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); + +void drm_modeset_lock_init(struct drm_modeset_lock *lock); + +/** + * drm_modeset_lock_fini - cleanup lock + * @lock: lock to cleanup + */ +static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) +{ + WARN_ON(!list_empty(&lock->head)); +} + +/** + * drm_modeset_is_locked - equivalent to mutex_is_locked() + * @lock: lock to check + */ +static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) +{ + return ww_mutex_is_locked(&lock->mutex); +} + +/** + * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held() + * @lock: lock to check + */ +static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) +{ + lockdep_assert_held(&lock->mutex.base); +} + +int drm_modeset_lock(struct drm_modeset_lock *lock, + struct drm_modeset_acquire_ctx *ctx); +int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock); +void drm_modeset_unlock(struct drm_modeset_lock *lock); + +struct drm_device; +struct drm_crtc; +struct drm_plane; + +void drm_modeset_lock_all(struct drm_device *dev); +void drm_modeset_unlock_all(struct drm_device *dev); +void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); + +int drm_modeset_lock_all_ctx(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +/** + * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks + * @dev: drm device + * @ctx: local modeset acquire context, will be dereferenced + * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init() + * @ret: local ret/err/etc variable to track error status + * + * Use these macros to simplify grabbing all modeset locks using a local + * context. This has the advantage of reducing boilerplate, but also properly + * checking return values where appropriate. + * + * Any code run between BEGIN and END will be holding the modeset locks. + * + * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and + * forth between the labels on deadlock and error conditions. + * + * Drivers can acquire additional modeset locks. If any lock acquisition + * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with + * the @ret parameter containing the return value of drm_modeset_lock(). + * + * Returns: + * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN() + * is 0, so no error checking is necessary + */ +#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_lock(&dev->mode_config.mutex); \ + drm_modeset_acquire_init(&ctx, flags); \ +modeset_lock_retry: \ + ret = drm_modeset_lock_all_ctx(dev, &ctx); \ + if (ret) \ + goto modeset_lock_fail; + +/** + * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @dev: drm device + * @ctx: local modeset acquire context, will be dereferenced + * @ret: local ret/err/etc variable to track error status + * + * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN + * if ret is -EDEADLK. + * + * It's important that you use the same ret variable for begin and end so + * deadlock conditions are properly handled. + * + * Returns: + * ret will be untouched unless it is -EDEADLK on entry. That means that if you + * successfully acquire the locks, ret will be whatever your code sets it to. If + * there is a deadlock or other failure with acquire or backoff, ret will be set + * to that failure. In both of these cases the code between BEGIN/END will not + * be run, so the failure will reflect the inability to grab the locks. + */ +#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ +modeset_lock_fail: \ + if (ret == -EDEADLK) { \ + ret = drm_modeset_backoff(&ctx); \ + if (!ret) \ + goto modeset_lock_retry; \ + } \ + drm_modeset_drop_locks(&ctx); \ + drm_modeset_acquire_fini(&ctx); \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_unlock(&dev->mode_config.mutex); + +#endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h new file mode 100644 index 000000000..b9b093add --- /dev/null +++ b/include/drm/drm_of.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_OF_H__ +#define __DRM_OF_H__ + +#include <linux/of_graph.h> +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) +#include <drm/drm_bridge.h> +#endif + +struct component_master_ops; +struct component_match; +struct device; +struct drm_device; +struct drm_encoder; +struct drm_panel; +struct drm_bridge; +struct device_node; + +/** + * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection + * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated + * from the first port, odd pixels from the second port + * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated + * from the first port, even pixels from the second port + */ +enum drm_lvds_dual_link_pixels { + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0, + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1, +}; + +#ifdef CONFIG_OF +uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port); +uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port); +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node); +int drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops); +int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint); +int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge); +int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, + const struct device_node *port2); +#else +static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port) +{ + return 0; +} + +static inline void +drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ +} + +static inline int +drm_of_component_probe(struct device *dev, + int (*compare_of)(struct device *, void *), + const struct component_master_ops *m_ops) +{ + return -EINVAL; +} + +static inline int drm_of_encoder_active_endpoint(struct device_node *node, + struct drm_encoder *encoder, + struct of_endpoint *endpoint) +{ + return -EINVAL; +} +static inline int drm_of_find_panel_or_bridge(const struct device_node *np, + int port, int endpoint, + struct drm_panel **panel, + struct drm_bridge **bridge) +{ + return -EINVAL; +} + +static inline int +drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, + const struct device_node *port2) +{ + return -EINVAL; +} +#endif + +/* + * drm_of_panel_bridge_remove - remove panel bridge + * @np: device tree node containing panel bridge output ports + * + * Remove the panel bridge of a given DT node's port and endpoint number + * + * Returns zero if successful, or one of the standard error codes if it fails. + */ +static inline int drm_of_panel_bridge_remove(const struct device_node *np, + int port, int endpoint) +{ +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) + struct drm_bridge *bridge; + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, endpoint); + if (!remote) + return -ENODEV; + + bridge = of_drm_find_bridge(remote); + drm_panel_bridge_remove(bridge); + + return 0; +#else + return -EINVAL; +#endif +} + +static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.id; +} + +static inline int drm_of_encoder_active_port_id(struct device_node *node, + struct drm_encoder *encoder) +{ + struct of_endpoint endpoint; + int ret = drm_of_encoder_active_endpoint(node, encoder, + &endpoint); + + return ret ?: endpoint.port; +} + +#endif /* __DRM_OF_H__ */ diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h new file mode 100644 index 000000000..33605c3f0 --- /dev/null +++ b/include/drm/drm_panel.h @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_PANEL_H__ +#define __DRM_PANEL_H__ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/list.h> + +struct backlight_device; +struct device_node; +struct drm_connector; +struct drm_device; +struct drm_panel; +struct display_timing; + +enum drm_panel_orientation; + +/** + * struct drm_panel_funcs - perform operations on a given panel + * + * The .prepare() function is typically called before the display controller + * starts to transmit video data. Panel drivers can use this to turn the panel + * on and wait for it to become ready. If additional configuration is required + * (via a control bus such as I2C, SPI or DSI for example) this is a good time + * to do that. + * + * After the display controller has started transmitting video data, it's safe + * to call the .enable() function. This will typically enable the backlight to + * make the image on screen visible. Some panels require a certain amount of + * time or frames before the image is displayed. This function is responsible + * for taking this into account before enabling the backlight to avoid visual + * glitches. + * + * Before stopping video transmission from the display controller it can be + * necessary to turn off the panel to avoid visual glitches. This is done in + * the .disable() function. Analogously to .enable() this typically involves + * turning off the backlight and waiting for some time to make sure no image + * is visible on the panel. It is then safe for the display controller to + * cease transmission of video data. + * + * To save power when no video data is transmitted, a driver can power down + * the panel. This is the job of the .unprepare() function. + * + * Backlight can be handled automatically if configured using + * drm_panel_of_backlight(). Then the driver does not need to implement the + * functionality to enable/disable backlight. + */ +struct drm_panel_funcs { + /** + * @prepare: + * + * Turn on panel and perform set up. + * + * This function is optional. + */ + int (*prepare)(struct drm_panel *panel); + + /** + * @enable: + * + * Enable panel (turn on back light, etc.). + * + * This function is optional. + */ + int (*enable)(struct drm_panel *panel); + + /** + * @disable: + * + * Disable panel (turn off back light, etc.). + * + * This function is optional. + */ + int (*disable)(struct drm_panel *panel); + + /** + * @unprepare: + * + * Turn off panel. + * + * This function is optional. + */ + int (*unprepare)(struct drm_panel *panel); + + /** + * @get_modes: + * + * Add modes to the connector that the panel is attached to + * and returns the number of modes added. + * + * This function is mandatory. + */ + int (*get_modes)(struct drm_panel *panel, + struct drm_connector *connector); + + /** + * @get_timings: + * + * Copy display timings into the provided array and return + * the number of display timings available. + * + * This function is optional. + */ + int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, + struct display_timing *timings); +}; + +/** + * struct drm_panel - DRM panel object + */ +struct drm_panel { + /** + * @dev: + * + * Parent device of the panel. + */ + struct device *dev; + + /** + * @backlight: + * + * Backlight device, used to turn on backlight after the call + * to enable(), and to turn off backlight before the call to + * disable(). + * backlight is set by drm_panel_of_backlight() and drivers + * shall not assign it. + */ + struct backlight_device *backlight; + + /** + * @funcs: + * + * Operations that can be performed on the panel. + */ + const struct drm_panel_funcs *funcs; + + /** + * @connector_type: + * + * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to + * initialise the drm_connector corresponding to the panel with the + * correct connector type. + */ + int connector_type; + + /** + * @list: + * + * Panel entry in registry. + */ + struct list_head list; +}; + +void drm_panel_init(struct drm_panel *panel, struct device *dev, + const struct drm_panel_funcs *funcs, + int connector_type); + +void drm_panel_add(struct drm_panel *panel); +void drm_panel_remove(struct drm_panel *panel); + +int drm_panel_prepare(struct drm_panel *panel); +int drm_panel_unprepare(struct drm_panel *panel); + +int drm_panel_enable(struct drm_panel *panel); +int drm_panel_disable(struct drm_panel *panel); + +int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector); + +#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) +struct drm_panel *of_drm_find_panel(const struct device_node *np); +int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation); +#else +static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) +{ + return ERR_PTR(-ENODEV); +} + +static inline int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation) +{ + return -ENODEV; +} +#endif + +#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) +int drm_panel_of_backlight(struct drm_panel *panel); +#else +static inline int drm_panel_of_backlight(struct drm_panel *panel) +{ + return 0; +} +#endif + +#endif diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h new file mode 100644 index 000000000..b7e899ce4 --- /dev/null +++ b/include/drm/drm_pciids.h @@ -0,0 +1,814 @@ +/* SPDX-License-Identifier: MIT */ +#define radeon_PCI_IDS \ + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ + {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ + {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ + {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ + {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ + {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ + {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ + {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \ + {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ + {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ + {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6703, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6704, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6705, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6706, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6707, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6708, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6718, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ + {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0, 0, 0} + +#define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mga_PCI_IDS \ + {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ + {0, 0, 0} + +#define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0, 0, 0} + +#define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ + {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ + {0, 0, 0} + +#define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define savage_PCI_IDS \ + {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ + {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ + {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ + {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ + {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ + {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0, 0, 0} diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h new file mode 100644 index 000000000..3f396d94a --- /dev/null +++ b/include/drm/drm_plane.h @@ -0,0 +1,865 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PLANE_H__ +#define __DRM_PLANE_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode_object.h> +#include <drm/drm_color_mgmt.h> +#include <drm/drm_rect.h> +#include <drm/drm_modeset_lock.h> +#include <drm/drm_util.h> + +struct drm_crtc; +struct drm_printer; +struct drm_modeset_acquire_ctx; + +/** + * struct drm_plane_state - mutable plane state + * + * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and + * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the + * raw coordinates provided by userspace. Drivers should use + * drm_atomic_helper_check_plane_state() and only use the derived rectangles in + * @src and @dst to program the hardware. + */ +struct drm_plane_state { + /** @plane: backpointer to the plane */ + struct drm_plane *plane; + + /** + * @crtc: + * + * Currently bound CRTC, NULL if disabled. Do not this write directly, + * use drm_atomic_set_crtc_for_plane() + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer. Do not write this directly, use + * drm_atomic_set_fb_for_plane() + */ + struct drm_framebuffer *fb; + + /** + * @fence: + * + * Optional fence to wait for before scanning out @fb. The core atomic + * code will set this when userspace is using explicit fencing. Do not + * write this field directly for a driver's implicit fence, use + * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is + * preserved. + * + * Drivers should store any implicit fence in this from their + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb() + * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. + */ + struct dma_fence *fence; + + /** + * @crtc_x: + * + * Left position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + + int32_t crtc_x; + /** + * @crtc_y: + * + * Upper position of visible portion of plane on crtc, signed dest + * location allows it to be partially off screen. + */ + int32_t crtc_y; + + /** @crtc_w: width of visible portion of plane on crtc */ + /** @crtc_h: height of visible portion of plane on crtc */ + uint32_t crtc_w, crtc_h; + + /** + * @src_x: left position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_x; + /** + * @src_y: upper position of visible portion of plane within plane (in + * 16.16 fixed point). + */ + uint32_t src_y; + /** @src_w: width of visible portion of plane (in 16.16) */ + /** @src_h: height of visible portion of plane (in 16.16) */ + uint32_t src_h, src_w; + + /** + * @alpha: + * Opacity of the plane with 0 as completely transparent and 0xffff as + * completely opaque. See drm_plane_create_alpha_property() for more + * details. + */ + u16 alpha; + + /** + * @pixel_blend_mode: + * The alpha blending equation selection, describing how the pixels from + * the current plane are composited with the background. Value can be + * one of DRM_MODE_BLEND_* + */ + uint16_t pixel_blend_mode; + + /** + * @rotation: + * Rotation of the plane. See drm_plane_create_rotation_property() for + * more details. + */ + unsigned int rotation; + + /** + * @zpos: + * Priority of the given plane on crtc (optional). + * + * User-space may set mutable zpos properties so that multiple active + * planes on the same CRTC have identical zpos values. This is a + * user-space bug, but drivers can solve the conflict by comparing the + * plane object IDs; the plane with a higher ID is stacked on top of a + * plane with a lower ID. + * + * See drm_plane_create_zpos_property() and + * drm_plane_create_zpos_immutable_property() for more details. + */ + unsigned int zpos; + + /** + * @normalized_zpos: + * Normalized value of zpos: unique, range from 0 to N-1 where N is the + * number of active planes for given crtc. Note that the driver must set + * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to + * update this before it can be trusted. + */ + unsigned int normalized_zpos; + + /** + * @color_encoding: + * + * Color encoding for non RGB formats + */ + enum drm_color_encoding color_encoding; + + /** + * @color_range: + * + * Color range for non RGB formats + */ + enum drm_color_range color_range; + + /** + * @fb_damage_clips: + * + * Blob representing damage (area in plane framebuffer that changed + * since last plane update) as an array of &drm_mode_rect in framebuffer + * coodinates of the attached framebuffer. Note that unlike plane src, + * damage clips are not in 16.16 fixed point. + */ + struct drm_property_blob *fb_damage_clips; + + /** + * @src: + * + * source coordinates of the plane (in 16.16). + * + * When using drm_atomic_helper_check_plane_state(), + * the coordinates are clipped, but the driver may choose + * to use unclipped coordinates instead when the hardware + * performs the clipping automatically. + */ + /** + * @dst: + * + * clipped destination coordinates of the plane. + * + * When using drm_atomic_helper_check_plane_state(), + * the coordinates are clipped, but the driver may choose + * to use unclipped coordinates instead when the hardware + * performs the clipping automatically. + */ + struct drm_rect src, dst; + + /** + * @visible: + * + * Visibility of the plane. This can be false even if fb!=NULL and + * crtc!=NULL, due to clipping. + */ + bool visible; + + /** + * @commit: Tracks the pending commit to prevent use-after-free conditions, + * and for async plane updates. + * + * May be NULL. + */ + struct drm_crtc_commit *commit; + + /** @state: backpointer to global drm_atomic_state */ + struct drm_atomic_state *state; +}; + +static inline struct drm_rect +drm_plane_state_src(const struct drm_plane_state *state) +{ + struct drm_rect src = { + .x1 = state->src_x, + .y1 = state->src_y, + .x2 = state->src_x + state->src_w, + .y2 = state->src_y + state->src_h, + }; + return src; +} + +static inline struct drm_rect +drm_plane_state_dest(const struct drm_plane_state *state) +{ + struct drm_rect dest = { + .x1 = state->crtc_x, + .y1 = state->crtc_y, + .x2 = state->crtc_x + state->crtc_w, + .y2 = state->crtc_y + state->crtc_h, + }; + return dest; +} + +/** + * struct drm_plane_funcs - driver plane control functions + */ +struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -<errorname>EINVAL</errorname> error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*update_plane)(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*disable_plane)(struct drm_plane *plane, + struct drm_modeset_acquire_ctx *ctx); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ + void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ + void (*reset)(struct drm_plane *plane); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. For atomic drivers it is not used because + * property handling is done entirely in the DRM core. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_plane *plane, + struct drm_property *property, uint64_t val); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers guarantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. + * + * This callback is mandatory for atomic drivers. + * + * Atomic drivers which don't subclass &struct drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before &drm_plane.state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ + struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + * + * This callback is mandatory for atomic drivers. + */ + void (*atomic_destroy_state)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ + int (*atomic_set_property)(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE IOCTL. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ + int (*atomic_get_property)(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val); + /** + * @late_register: + * + * This optional hook can be used to register additional userspace + * interfaces attached to the plane like debugfs interfaces. + * It is called late in the driver load sequence from drm_dev_register(). + * Everything added from this callback should be unregistered in + * the early_unregister callback. + * + * Returns: + * + * 0 on success, or a negative error code on failure. + */ + int (*late_register)(struct drm_plane *plane); + + /** + * @early_unregister: + * + * This optional hook should be used to unregister the additional + * userspace interfaces attached to the plane from + * @late_register. It is called from drm_dev_unregister(), + * early in the driver unload sequence to disable userspace access + * before data structures are torndown. + */ + void (*early_unregister)(struct drm_plane *plane); + + /** + * @atomic_print_state: + * + * If driver subclasses &struct drm_plane_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_plane_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_plane_state *state); + + /** + * @format_mod_supported: + * + * This optional hook is used for the DRM to determine if the given + * format/modifier combination is valid for the plane. This allows the + * DRM to generate the correct format bitmask (which formats apply to + * which modifier), and to valdiate modifiers at atomic_check time. + * + * If not present, then any modifier in the plane's modifier + * list is allowed with any of the plane's formats. + * + * Returns: + * + * True if the given modifier is valid for that format on the plane. + * False otherwise. + */ + bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, + uint64_t modifier); +}; + +/** + * enum drm_plane_type - uapi plane type enumeration + * + * For historical reasons not all planes are made the same. This enumeration is + * used to tell the different types of planes apart to implement the different + * uapi semantics for them. For userspace which is universal plane aware and + * which is using that atomic IOCTL there's no difference between these planes + * (beyong what the driver and hardware can support of course). + * + * For compatibility with legacy userspace, only overlay planes are made + * available to userspace by default. Userspace clients may set the + * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * wish to receive a universal plane list containing all plane types. See also + * drm_for_each_legacy_plane(). + * + * WARNING: The values of this enum is UABI since they're exposed in the "type" + * property. + */ +enum drm_plane_type { + /** + * @DRM_PLANE_TYPE_OVERLAY: + * + * Overlay planes represent all non-primary, non-cursor planes. Some + * drivers refer to these types of planes as "sprites" internally. + */ + DRM_PLANE_TYPE_OVERLAY, + + /** + * @DRM_PLANE_TYPE_PRIMARY: + * + * Primary planes represent a "main" plane for a CRTC. Primary planes + * are the planes operated upon by CRTC modesetting and flipping + * operations described in the &drm_crtc_funcs.page_flip and + * &drm_crtc_funcs.set_config hooks. + */ + DRM_PLANE_TYPE_PRIMARY, + + /** + * @DRM_PLANE_TYPE_CURSOR: + * + * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes + * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and + * DRM_IOCTL_MODE_CURSOR2 IOCTLs. + */ + DRM_PLANE_TYPE_CURSOR, +}; + + +/** + * struct drm_plane - central DRM plane control structure + * + * Planes represent the scanout hardware of a display block. They receive their + * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control + * the color conversion, see `Plane Composition Properties`_ for more details, + * and are also involved in the color conversion of input pixels, see `Color + * Management Properties`_ for details on that. + */ +struct drm_plane { + /** @dev: DRM device this plane belongs to */ + struct drm_device *dev; + + /** + * @head: + * + * List of all planes on @dev, linked from &drm_mode_config.plane_list. + * Invariant over the lifetime of @dev and therefore does not need + * locking. + */ + struct list_head head; + + /** @name: human readable name, can be overwritten by the driver */ + char *name; + + /** + * @mutex: + * + * Protects modeset plane state, together with the &drm_crtc.mutex of + * CRTC this plane is linked to (when active, getting activated or + * getting disabled). + * + * For atomic drivers specifically this protects @state. + */ + struct drm_modeset_lock mutex; + + /** @base: base mode object */ + struct drm_mode_object base; + + /** + * @possible_crtcs: pipes this plane can be bound to constructed from + * drm_crtc_mask() + */ + uint32_t possible_crtcs; + /** @format_types: array of formats supported by this plane */ + uint32_t *format_types; + /** @format_count: Size of the array pointed at by @format_types. */ + unsigned int format_count; + /** + * @format_default: driver hasn't supplied supported formats for the + * plane. Used by the drm_plane_init compatibility wrapper only. + */ + bool format_default; + + /** @modifiers: array of modifiers supported by this plane */ + uint64_t *modifiers; + /** @modifier_count: Size of the array pointed at by @modifier_count. */ + unsigned int modifier_count; + + /** + * @crtc: + * + * Currently bound CRTC, only meaningful for non-atomic drivers. For + * atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.crtc. + */ + struct drm_crtc *crtc; + + /** + * @fb: + * + * Currently bound framebuffer, only meaningful for non-atomic drivers. + * For atomic drivers this is forced to be NULL, atomic drivers should + * instead check &drm_plane_state.fb. + */ + struct drm_framebuffer *fb; + + /** + * @old_fb: + * + * Temporary tracking of the old fb while a modeset is ongoing. Only + * used by non-atomic drivers, forced to be NULL for atomic drivers. + */ + struct drm_framebuffer *old_fb; + + /** @funcs: plane control functions */ + const struct drm_plane_funcs *funcs; + + /** @properties: property tracking for this plane */ + struct drm_object_properties properties; + + /** @type: Type of plane, see &enum drm_plane_type for details. */ + enum drm_plane_type type; + + /** + * @index: Position inside the mode_config.list, can be used as an array + * index. It is invariant over the lifetime of the plane. + */ + unsigned index; + + /** @helper_private: mid-layer private data */ + const struct drm_plane_helper_funcs *helper_private; + + /** + * @state: + * + * Current atomic state for this plane. + * + * This is protected by @mutex. Note that nonblocking atomic commits + * access the current plane state without taking locks. Either by going + * through the &struct drm_atomic_state pointers, see + * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and + * for_each_new_plane_in_state(). Or through careful ordering of atomic + * commit operations as implemented in the atomic helpers, see + * &struct drm_crtc_commit. + */ + struct drm_plane_state *state; + + /** + * @alpha_property: + * Optional alpha property for this plane. See + * drm_plane_create_alpha_property(). + */ + struct drm_property *alpha_property; + /** + * @zpos_property: + * Optional zpos property for this plane. See + * drm_plane_create_zpos_property(). + */ + struct drm_property *zpos_property; + /** + * @rotation_property: + * Optional rotation property for this plane. See + * drm_plane_create_rotation_property(). + */ + struct drm_property *rotation_property; + /** + * @blend_mode_property: + * Optional "pixel blend mode" enum property for this plane. + * Blend mode property represents the alpha blending equation selection, + * describing how the pixels from the current plane are composited with + * the background. + */ + struct drm_property *blend_mode_property; + + /** + * @color_encoding_property: + * + * Optional "COLOR_ENCODING" enum property for specifying + * color encoding for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_encoding_property; + /** + * @color_range_property: + * + * Optional "COLOR_RANGE" enum property for specifying + * color range for non RGB formats. + * See drm_plane_create_color_properties(). + */ + struct drm_property *color_range_property; +}; + +#define obj_to_plane(x) container_of(x, struct drm_plane, base) + +__printf(9, 10) +int drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...); +int drm_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + bool is_primary); +void drm_plane_cleanup(struct drm_plane *plane); + +/** + * drm_plane_index - find the index of a registered plane + * @plane: plane to find index for + * + * Given a registered plane, return the index of that plane within a DRM + * device's list of planes. + */ +static inline unsigned int drm_plane_index(const struct drm_plane *plane) +{ + return plane->index; +} + +/** + * drm_plane_mask - find the mask of a registered plane + * @plane: plane to find mask for + */ +static inline u32 drm_plane_mask(const struct drm_plane *plane) +{ + return 1 << drm_plane_index(plane); +} + +struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); +void drm_plane_force_disable(struct drm_plane *plane); + +int drm_mode_plane_set_obj_prop(struct drm_plane *plane, + struct drm_property *property, + uint64_t value); + +/** + * drm_plane_find - find a &drm_plane + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: plane id + * + * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around + * drm_mode_object_find(). + */ +static inline struct drm_plane *drm_plane_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); + return mo ? obj_to_plane(mo) : NULL; +} + +/** + * drm_for_each_plane_mask - iterate over planes specified by bitmask + * @plane: the loop cursor + * @dev: the DRM device + * @plane_mask: bitmask of plane indices + * + * Iterate over all planes specified by bitmask. + */ +#define drm_for_each_plane_mask(plane, dev, plane_mask) \ + list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ + for_each_if ((plane_mask) & drm_plane_mask(plane)) + +/** + * drm_for_each_legacy_plane - iterate over all planes for legacy userspace + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all legacy planes of @dev, excluding primary and cursor planes. + * This is useful for implementing userspace apis when userspace is not + * universal plane aware. See also &enum drm_plane_type. + */ +#define drm_for_each_legacy_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) + +/** + * drm_for_each_plane - iterate over all planes + * @plane: the loop cursor + * @dev: the DRM device + * + * Iterate over all planes of @dev, include primary and cursor planes. + */ +#define drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) + +bool drm_any_plane_has_format(struct drm_device *dev, + u32 format, u64 modifier); +/** + * drm_plane_get_damage_clips_count - Returns damage clips count. + * @state: Plane state. + * + * Simple helper to get the number of &drm_mode_rect clips set by user-space + * during plane update. + * + * Return: Number of clips in plane fb_damage_clips blob property. + */ +static inline unsigned int +drm_plane_get_damage_clips_count(const struct drm_plane_state *state) +{ + return (state && state->fb_damage_clips) ? + state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0; +} + +/** + * drm_plane_get_damage_clips - Returns damage clips. + * @state: Plane state. + * + * Note that this function returns uapi type &drm_mode_rect. Drivers might + * instead be interested in internal &drm_rect which can be obtained by calling + * drm_helper_get_plane_damage_clips(). + * + * Return: Damage clips in plane fb_damage_clips blob property. + */ +static inline struct drm_mode_rect * +drm_plane_get_damage_clips(const struct drm_plane_state *state) +{ + return (struct drm_mode_rect *)((state && state->fb_damage_clips) ? + state->fb_damage_clips->data : NULL); +} + +#endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h new file mode 100644 index 000000000..331ebd60b --- /dev/null +++ b/include/drm/drm_plane_helper.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_PLANE_HELPER_H +#define DRM_PLANE_HELPER_H + +#include <drm/drm_rect.h> +#include <drm/drm_crtc.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_modeset_helper.h> + +/* + * Drivers that don't allow primary plane scaling may pass this macro in place + * of the min/max scale parameters of the update checker function. + * + * Due to src being in 16.16 fixed point and dest being in integer pixels, + * 1<<16 represents no scaling. + */ +#define DRM_PLANE_HELPER_NO_SCALING (1<<16) + +void drm_primary_helper_destroy(struct drm_plane *plane); +extern const struct drm_plane_funcs drm_primary_helper_funcs; + +#endif diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h new file mode 100644 index 000000000..0f69f9fbf --- /dev/null +++ b/include/drm/drm_prime.h @@ -0,0 +1,111 @@ +/* + * Copyright © 2012 Red Hat + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Rob Clark <rob.clark@linaro.org> + * + */ + +#ifndef __DRM_PRIME_H__ +#define __DRM_PRIME_H__ + +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/scatterlist.h> + +/** + * struct drm_prime_file_private - per-file tracking for PRIME + * + * This just contains the internal &struct dma_buf and handle caches for each + * &struct drm_file used by the PRIME core code. + */ +struct drm_prime_file_private { +/* private: */ + struct mutex lock; + struct rb_root dmabufs; + struct rb_root handles; +}; + +struct device; + +struct dma_buf_export_info; +struct dma_buf; +struct dma_buf_attachment; + +enum dma_data_direction; + +struct drm_device; +struct drm_gem_object; +struct drm_file; + +/* core prime functions */ +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +void drm_gem_dmabuf_release(struct dma_buf *dma_buf); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); + +/* helper functions for exporting */ +int drm_gem_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +void drm_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); +struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir); +void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir); +void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); + +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); + +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages); +struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, + int flags); + +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt); + +/* helper functions for importing */ +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); + +int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + dma_addr_t *addrs, int max_pages); + + +#endif /* __DRM_PRIME_H__ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h new file mode 100644 index 000000000..f32d179e1 --- /dev/null +++ b/include/drm/drm_print.h @@ -0,0 +1,566 @@ +/* + * Copyright (C) 2016 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + */ + +#ifndef DRM_PRINT_H_ +#define DRM_PRINT_H_ + +#include <linux/compiler.h> +#include <linux/printk.h> +#include <linux/seq_file.h> +#include <linux/device.h> +#include <linux/debugfs.h> + +#include <drm/drm.h> + +/* Do *not* use outside of drm_print.[ch]! */ +extern unsigned int __drm_debug; + +/** + * DOC: print + * + * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same + * debug code to be used for both debugfs and printk logging. + * + * For example:: + * + * void log_some_info(struct drm_printer *p) + * { + * drm_printf(p, "foo=%d\n", foo); + * drm_printf(p, "bar=%d\n", bar); + * } + * + * #ifdef CONFIG_DEBUG_FS + * void debugfs_show(struct seq_file *f) + * { + * struct drm_printer p = drm_seq_file_printer(f); + * log_some_info(&p); + * } + * #endif + * + * void some_other_function(...) + * { + * struct drm_printer p = drm_info_printer(drm->dev); + * log_some_info(&p); + * } + */ + +/** + * struct drm_printer - drm output "stream" + * + * Do not use struct members directly. Use drm_printer_seq_file(), + * drm_printer_info(), etc to initialize. And drm_printf() for output. + */ +struct drm_printer { + /* private: */ + void (*printfn)(struct drm_printer *p, struct va_format *vaf); + void (*puts)(struct drm_printer *p, const char *str); + void *arg; + const char *prefix; +}; + +void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_coredump(struct drm_printer *p, const char *str); +void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); +void __drm_puts_seq_file(struct drm_printer *p, const char *str); +void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); + +__printf(2, 3) +void drm_printf(struct drm_printer *p, const char *f, ...); +void drm_puts(struct drm_printer *p, const char *str); +void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); +void drm_print_bits(struct drm_printer *p, unsigned long value, + const char * const bits[], unsigned int nbits); + +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) + +/** + * struct drm_print_iterator - local struct used with drm_printer_coredump + * @data: Pointer to the devcoredump output buffer + * @start: The offset within the buffer to start writing + * @remain: The number of bytes to write for this iteration + */ +struct drm_print_iterator { + void *data; + ssize_t start; + ssize_t remain; + /* private: */ + ssize_t offset; +}; + +/** + * drm_coredump_printer - construct a &drm_printer that can output to a buffer + * from the read function for devcoredump + * @iter: A pointer to a struct drm_print_iterator for the read instance + * + * This wrapper extends drm_printf() to work with a dev_coredumpm() callback + * function. The passed in drm_print_iterator struct contains the buffer + * pointer, size and offset as passed in from devcoredump. + * + * For example:: + * + * void coredump_read(char *buffer, loff_t offset, size_t count, + * void *data, size_t datalen) + * { + * struct drm_print_iterator iter; + * struct drm_printer p; + * + * iter.data = buffer; + * iter.start = offset; + * iter.remain = count; + * + * p = drm_coredump_printer(&iter); + * + * drm_printf(p, "foo=%d\n", foo); + * } + * + * void makecoredump(...) + * { + * ... + * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL, + * coredump_read, ...) + * } + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer +drm_coredump_printer(struct drm_print_iterator *iter) +{ + struct drm_printer p = { + .printfn = __drm_printfn_coredump, + .puts = __drm_puts_coredump, + .arg = iter, + }; + + /* Set the internal offset of the iterator to zero */ + iter->offset = 0; + + return p; +} + +/** + * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file + * @f: the &struct seq_file to output to + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) +{ + struct drm_printer p = { + .printfn = __drm_printfn_seq_file, + .puts = __drm_puts_seq_file, + .arg = f, + }; + return p; +} + +/** + * drm_info_printer - construct a &drm_printer that outputs to dev_printk() + * @dev: the &struct device pointer + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_info_printer(struct device *dev) +{ + struct drm_printer p = { + .printfn = __drm_printfn_info, + .arg = dev, + }; + return p; +} + +/** + * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_debug_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_debug, + .prefix = prefix + }; + return p; +} + +/** + * drm_err_printer - construct a &drm_printer that outputs to pr_err() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_err_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_err, + .prefix = prefix + }; + return p; +} + +/** + * enum drm_debug_category - The DRM debug categories + * + * Each of the DRM debug logging macros use a specific category, and the logging + * is filtered by the drm.debug module parameter. This enum specifies the values + * for the interface. + * + * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except + * DRM_DEBUG() logs to DRM_UT_CORE. + * + * Enabling verbose debug messages is done through the drm.debug parameter, each + * category being enabled by a bit: + * + * - drm.debug=0x1 will enable CORE messages + * - drm.debug=0x2 will enable DRIVER messages + * - drm.debug=0x3 will enable CORE and DRIVER messages + * - ... + * - drm.debug=0x1ff will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node:: + * + * # echo 0xf > /sys/module/drm/parameters/debug + * + */ +enum drm_debug_category { + /** + * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, + * drm_memory.c, ... + */ + DRM_UT_CORE = 0x01, + /** + * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, + * radeon, ... macro. + */ + DRM_UT_DRIVER = 0x02, + /** + * @DRM_UT_KMS: Used in the modesetting code. + */ + DRM_UT_KMS = 0x04, + /** + * @DRM_UT_PRIME: Used in the prime code. + */ + DRM_UT_PRIME = 0x08, + /** + * @DRM_UT_ATOMIC: Used in the atomic code. + */ + DRM_UT_ATOMIC = 0x10, + /** + * @DRM_UT_VBL: Used for verbose debug message in the vblank code. + */ + DRM_UT_VBL = 0x20, + /** + * @DRM_UT_STATE: Used for verbose atomic state debugging. + */ + DRM_UT_STATE = 0x40, + /** + * @DRM_UT_LEASE: Used in the lease code. + */ + DRM_UT_LEASE = 0x80, + /** + * @DRM_UT_DP: Used in the DP code. + */ + DRM_UT_DP = 0x100, + /** + * @DRM_UT_DRMRES: Used in the drm managed resources code. + */ + DRM_UT_DRMRES = 0x200, +}; + +static inline bool drm_debug_enabled(enum drm_debug_category category) +{ + return unlikely(__drm_debug & category); +} + +/* + * struct device based logging + * + * Prefer drm_device based logging over device or prink based logging. + */ + +__printf(3, 4) +void drm_dev_printk(const struct device *dev, const char *level, + const char *format, ...); +__printf(3, 4) +void drm_dev_dbg(const struct device *dev, enum drm_debug_category category, + const char *format, ...); + +/** + * DRM_DEV_ERROR() - Error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__) + +/** + * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + * + * Like DRM_ERROR() but won't flood the log. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * DRM_DEV_DEBUG() - Debug output for generic drm code + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) +/** + * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) +/** + * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ + drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +/* + * struct drm_device based logging + * + * Prefer drm_device based logging over device or prink based logging. + */ + +/* Helper for struct drm_device based logging. */ +#define __drm_printk(drm, level, type, fmt, ...) \ + dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__) + + +#define drm_info(drm, fmt, ...) \ + __drm_printk((drm), info,, fmt, ##__VA_ARGS__) + +#define drm_notice(drm, fmt, ...) \ + __drm_printk((drm), notice,, fmt, ##__VA_ARGS__) + +#define drm_warn(drm, fmt, ...) \ + __drm_printk((drm), warn,, fmt, ##__VA_ARGS__) + +#define drm_err(drm, fmt, ...) \ + __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_info_once(drm, fmt, ...) \ + __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__) + +#define drm_notice_once(drm, fmt, ...) \ + __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__) + +#define drm_warn_once(drm, fmt, ...) \ + __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__) + +#define drm_err_once(drm, fmt, ...) \ + __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_err_ratelimited(drm, fmt, ...) \ + __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__) + + +#define drm_dbg_core(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) +#define drm_dbg(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) +#define drm_dbg_kms(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) +#define drm_dbg_prime(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__) +#define drm_dbg_atomic(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) +#define drm_dbg_vbl(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__) +#define drm_dbg_state(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__) +#define drm_dbg_lease(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__) +#define drm_dbg_dp(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__) +#define drm_dbg_drmres(drm, fmt, ...) \ + drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) + + +/* + * printk based logging + * + * Prefer drm_device based logging over device or prink based logging. + */ + +__printf(2, 3) +void __drm_dbg(enum drm_debug_category category, const char *format, ...); +__printf(1, 2) +void __drm_err(const char *format, ...); + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_ERROR(fmt, ...) \ + __drm_err(fmt, ##__VA_ARGS__) + +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG(fmt, ...) \ + __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_DRIVER(fmt, ...) \ + __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_KMS(fmt, ...) \ + __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_PRIME(fmt, ...) \ + __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_VBL(fmt, ...) \ + __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_LEASE(fmt, ...) \ + __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_DP(fmt, ...) \ + __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) + + +#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \ +}) + +/* + * struct drm_device based WARNs + * + * drm_WARN*() acts like WARN*(), but with the key difference of + * using device specific information so that we know from which device + * warning is originating from. + * + * Prefer drm_device based drm_WARN* over regular WARN* + */ + +/* Helper for struct drm_device based WARNs */ +#define drm_WARN(drm, condition, format, arg...) \ + WARN(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) + +#define drm_WARN_ONCE(drm, condition, format, arg...) \ + WARN_ONCE(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) + +#define drm_WARN_ON(drm, x) \ + drm_WARN((drm), (x), "%s", \ + "drm_WARN_ON(" __stringify(x) ")") + +#define drm_WARN_ON_ONCE(drm, x) \ + drm_WARN_ONCE((drm), (x), "%s", \ + "drm_WARN_ON_ONCE(" __stringify(x) ")") + +#endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h new file mode 100644 index 000000000..8d3ed2834 --- /dev/null +++ b/include/drm/drm_probe_helper.h @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +#ifndef __DRM_PROBE_HELPER_H__ +#define __DRM_PROBE_HELPER_H__ + +#include <linux/types.h> + +struct drm_connector; +struct drm_device; +struct drm_modeset_acquire_ctx; + +int drm_helper_probe_single_connector_modes(struct drm_connector + *connector, uint32_t maxX, + uint32_t maxY); +int drm_helper_probe_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); +void drm_kms_helper_poll_init(struct drm_device *dev); +void drm_kms_helper_poll_fini(struct drm_device *dev); +bool drm_helper_hpd_irq_event(struct drm_device *dev); +void drm_kms_helper_hotplug_event(struct drm_device *dev); + +void drm_kms_helper_poll_disable(struct drm_device *dev); +void drm_kms_helper_poll_enable(struct drm_device *dev); +bool drm_kms_helper_is_poll_worker(void); + +#endif diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h new file mode 100644 index 000000000..4a0a80d65 --- /dev/null +++ b/include/drm/drm_property.h @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef __DRM_PROPERTY_H__ +#define __DRM_PROPERTY_H__ + +#include <linux/list.h> +#include <linux/ctype.h> +#include <drm/drm_mode_object.h> + +#include <uapi/drm/drm_mode.h> + +/** + * struct drm_property_enum - symbolic values for enumerations + * @value: numeric property value for this enum entry + * @head: list of enum values, linked to &drm_property.enum_list + * @name: symbolic name for the enum + * + * For enumeration and bitmask properties this structure stores the symbolic + * decoding for each value. This is used for example for the rotation property. + */ +struct drm_property_enum { + uint64_t value; + struct list_head head; + char name[DRM_PROP_NAME_LEN]; +}; + +/** + * struct drm_property - modeset object property + * + * This structure represent a modeset object property. It combines both the name + * of the property with the set of permissible values. This means that when a + * driver wants to use a property with the same name on different objects, but + * with different value ranges, then it must create property for each one. An + * example would be rotation of &drm_plane, when e.g. the primary plane cannot + * be rotated. But if both the name and the value range match, then the same + * property structure can be instantiated multiple times for the same object. + * Userspace must be able to cope with this and cannot assume that the same + * symbolic property will have the same modeset object ID on all modeset + * objects. + * + * Properties are created by one of the special functions, as explained in + * detail in the @flags structure member. + * + * To actually expose a property it must be attached to each object using + * drm_object_attach_property(). Currently properties can only be attached to + * &drm_connector, &drm_crtc and &drm_plane. + * + * Properties are also used as the generic metadatatransport for the atomic + * IOCTL. Everything that was set directly in structures in the legacy modeset + * IOCTLs (like the plane source or destination windows, or e.g. the links to + * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set. + */ +struct drm_property { + /** + * @head: per-device list of properties, for cleanup. + */ + struct list_head head; + + /** + * @base: base KMS object + */ + struct drm_mode_object base; + + /** + * @flags: + * + * Property flags and type. A property needs to be one of the following + * types: + * + * DRM_MODE_PROP_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is unsigned. Range properties are created using + * drm_property_create_range(). + * + * DRM_MODE_PROP_SIGNED_RANGE + * Range properties report their minimum and maximum admissible unsigned values. + * The KMS core verifies that values set by application fit in that + * range. The range is signed. Range properties are created using + * drm_property_create_signed_range(). + * + * DRM_MODE_PROP_ENUM + * Enumerated properties take a numerical value that ranges from 0 to + * the number of enumerated values defined by the property minus one, + * and associate a free-formed string name to each value. Applications + * can retrieve the list of defined value-name pairs and use the + * numerical value to get and set property instance values. Enum + * properties are created using drm_property_create_enum(). + * + * DRM_MODE_PROP_BITMASK + * Bitmask properties are enumeration properties that additionally + * restrict all enumerated values to the 0..63 range. Bitmask property + * instance values combine one or more of the enumerated bits defined + * by the property. Bitmask properties are created using + * drm_property_create_bitmask(). + * + * DRM_MODE_PROB_OBJECT + * Object properties are used to link modeset objects. This is used + * extensively in the atomic support to create the display pipeline, + * by linking &drm_framebuffer to &drm_plane, &drm_plane to + * &drm_crtc and &drm_connector to &drm_crtc. An object property can + * only link to a specific type of &drm_mode_object, this limit is + * enforced by the core. Object properties are created using + * drm_property_create_object(). + * + * Object properties work like blob properties, but in a more + * general fashion. They are limited to atomic drivers and must have + * the DRM_MODE_PROP_ATOMIC flag set. + * + * DRM_MODE_PROP_BLOB + * Blob properties store a binary blob without any format restriction. + * The binary blobs are created as KMS standalone objects, and blob + * property instance values store the ID of their associated blob + * object. Blob properties are created by calling + * drm_property_create() with DRM_MODE_PROP_BLOB as the type. + * + * Actual blob objects to contain blob data are created using + * drm_property_create_blob(), or through the corresponding IOCTL. + * + * Besides the built-in limit to only accept blob objects blob + * properties work exactly like object properties. The only reasons + * blob properties exist is backwards compatibility with existing + * userspace. + * + * In addition a property can have any combination of the below flags: + * + * DRM_MODE_PROP_ATOMIC + * Set for properties which encode atomic modeset state. Such + * properties are not exposed to legacy userspace. + * + * DRM_MODE_PROP_IMMUTABLE + * Set for properties whose values cannot be changed by + * userspace. The kernel is allowed to update the value of these + * properties. This is generally used to expose probe state to + * userspace, e.g. the EDID, or the connector path property on DP + * MST sinks. Kernel can update the value of an immutable property + * by calling drm_object_property_set_value(). + */ + uint32_t flags; + + /** + * @name: symbolic name of the properties + */ + char name[DRM_PROP_NAME_LEN]; + + /** + * @num_values: size of the @values array. + */ + uint32_t num_values; + + /** + * @values: + * + * Array with limits and values for the property. The + * interpretation of these limits is dependent upon the type per @flags. + */ + uint64_t *values; + + /** + * @dev: DRM device + */ + struct drm_device *dev; + + /** + * @enum_list: + * + * List of &drm_prop_enum_list structures with the symbolic names for + * enum and bitmask values. + */ + struct list_head enum_list; +}; + +/** + * struct drm_property_blob - Blob data for &drm_property + * @base: base KMS object + * @dev: DRM device + * @head_global: entry on the global blob list in + * &drm_mode_config.property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file.blobs list. + * @length: size of the blob in bytes, invariant over the lifetime of the object + * @data: actual data, embedded at the end of this structure + * + * Blobs are used to store bigger values than what fits directly into the 64 + * bits available for a &drm_property. + * + * Blobs are reference counted using drm_property_blob_get() and + * drm_property_blob_put(). They are created using drm_property_create_blob(). + */ +struct drm_property_blob { + struct drm_mode_object base; + struct drm_device *dev; + struct list_head head_global; + struct list_head head_file; + size_t length; + void *data; +}; + +struct drm_prop_enum_list { + int type; + const char *name; +}; + +#define obj_to_property(x) container_of(x, struct drm_property, base) +#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) + +/** + * drm_property_type_is - check the type of a property + * @property: property to check + * @type: property type to compare with + * + * This is a helper function becauase the uapi encoding of property types is + * a bit special for historical reasons. + */ +static inline bool drm_property_type_is(struct drm_property *property, + uint32_t type) +{ + /* instanceof for props.. handles extended type vs original types: */ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; + return property->flags & type; +} + +struct drm_property *drm_property_create(struct drm_device *dev, + u32 flags, const char *name, + int num_values); +struct drm_property *drm_property_create_enum(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_values); +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + u32 flags, const char *name, + const struct drm_prop_enum_list *props, + int num_props, + uint64_t supported_bits); +struct drm_property *drm_property_create_range(struct drm_device *dev, + u32 flags, const char *name, + uint64_t min, uint64_t max); +struct drm_property *drm_property_create_signed_range(struct drm_device *dev, + u32 flags, const char *name, + int64_t min, int64_t max); +struct drm_property *drm_property_create_object(struct drm_device *dev, + u32 flags, const char *name, + uint32_t type); +struct drm_property *drm_property_create_bool(struct drm_device *dev, + u32 flags, const char *name); +int drm_property_add_enum(struct drm_property *property, + uint64_t value, const char *name); +void drm_property_destroy(struct drm_device *dev, struct drm_property *property); + +struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, + size_t length, + const void *data); +struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, + uint32_t id); +int drm_property_replace_global_blob(struct drm_device *dev, + struct drm_property_blob **replace, + size_t length, + const void *data, + struct drm_mode_object *obj_holds_id, + struct drm_property *prop_holds_id); +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob); +struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); +void drm_property_blob_put(struct drm_property_blob *blob); + +/** + * drm_property_find - find property object + * @dev: DRM device + * @file_priv: drm file to check for lease against. + * @id: property object id + * + * This function looks up the property object specified by id and returns it. + */ +static inline struct drm_property *drm_property_find(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id) +{ + struct drm_mode_object *mo; + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); + return mo ? obj_to_property(mo) : NULL; +} + +#endif diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h new file mode 100644 index 000000000..e7f4d24cd --- /dev/null +++ b/include/drm/drm_rect.h @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2011-2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef DRM_RECT_H +#define DRM_RECT_H + +#include <linux/types.h> + +/** + * DOC: rect utils + * + * Utility functions to help manage rectangular areas for + * clipping, scaling, etc. calculations. + */ + +/** + * struct drm_rect - two dimensional rectangle + * @x1: horizontal starting coordinate (inclusive) + * @x2: horizontal ending coordinate (exclusive) + * @y1: vertical starting coordinate (inclusive) + * @y2: vertical ending coordinate (exclusive) + */ +struct drm_rect { + int x1, y1, x2, y2; +}; + +/** + * DRM_RECT_FMT - printf string for &struct drm_rect + */ +#define DRM_RECT_FMT "%dx%d%+d%+d" +/** + * DRM_RECT_ARG - printf arguments for &struct drm_rect + * @r: rectangle struct + */ +#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 + +/** + * DRM_RECT_FP_FMT - printf string for &struct drm_rect in 16.16 fixed point + */ +#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" +/** + * DRM_RECT_FP_ARG - printf arguments for &struct drm_rect in 16.16 fixed point + * @r: rectangle struct + * + * This is useful for e.g. printing plane source rectangles, which are in 16.16 + * fixed point. + */ +#define DRM_RECT_FP_ARG(r) \ + drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ + drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ + (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ + (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 + +/** + * drm_rect_init - initialize the rectangle from x/y/w/h + * @r: rectangle + * @x: x coordinate + * @y: y coordinate + * @width: width + * @height: height + */ +static inline void drm_rect_init(struct drm_rect *r, int x, int y, + int width, int height) +{ + r->x1 = x; + r->y1 = y; + r->x2 = x + width; + r->y2 = y + height; +} + +/** + * drm_rect_adjust_size - adjust the size of the rectangle + * @r: rectangle to be adjusted + * @dw: horizontal adjustment + * @dh: vertical adjustment + * + * Change the size of rectangle @r by @dw in the horizontal direction, + * and by @dh in the vertical direction, while keeping the center + * of @r stationary. + * + * Positive @dw and @dh increase the size, negative values decrease it. + */ +static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh) +{ + r->x1 -= dw >> 1; + r->y1 -= dh >> 1; + r->x2 += (dw + 1) >> 1; + r->y2 += (dh + 1) >> 1; +} + +/** + * drm_rect_translate - translate the rectangle + * @r: rectangle to be tranlated + * @dx: horizontal translation + * @dy: vertical translation + * + * Move rectangle @r by @dx in the horizontal direction, + * and by @dy in the vertical direction. + */ +static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy) +{ + r->x1 += dx; + r->y1 += dy; + r->x2 += dx; + r->y2 += dy; +} + +/** + * drm_rect_translate_to - translate the rectangle to an absolute position + * @r: rectangle to be tranlated + * @x: horizontal position + * @y: vertical position + * + * Move rectangle @r to @x in the horizontal direction, + * and to @y in the vertical direction. + */ +static inline void drm_rect_translate_to(struct drm_rect *r, int x, int y) +{ + drm_rect_translate(r, x - r->x1, y - r->y1); +} + +/** + * drm_rect_downscale - downscale a rectangle + * @r: rectangle to be downscaled + * @horz: horizontal downscale factor + * @vert: vertical downscale factor + * + * Divide the coordinates of rectangle @r by @horz and @vert. + */ +static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert) +{ + r->x1 /= horz; + r->y1 /= vert; + r->x2 /= horz; + r->y2 /= vert; +} + +/** + * drm_rect_width - determine the rectangle width + * @r: rectangle whose width is returned + * + * RETURNS: + * The width of the rectangle. + */ +static inline int drm_rect_width(const struct drm_rect *r) +{ + return r->x2 - r->x1; +} + +/** + * drm_rect_height - determine the rectangle height + * @r: rectangle whose height is returned + * + * RETURNS: + * The height of the rectangle. + */ +static inline int drm_rect_height(const struct drm_rect *r) +{ + return r->y2 - r->y1; +} + +/** + * drm_rect_visible - determine if the rectangle is visible + * @r: rectangle whose visibility is returned + * + * RETURNS: + * %true if the rectangle is visible, %false otherwise. + */ +static inline bool drm_rect_visible(const struct drm_rect *r) +{ + return drm_rect_width(r) > 0 && drm_rect_height(r) > 0; +} + +/** + * drm_rect_equals - determine if two rectangles are equal + * @r1: first rectangle + * @r2: second rectangle + * + * RETURNS: + * %true if the rectangles are equal, %false otherwise. + */ +static inline bool drm_rect_equals(const struct drm_rect *r1, + const struct drm_rect *r2) +{ + return r1->x1 == r2->x1 && r1->x2 == r2->x2 && + r1->y1 == r2->y1 && r1->y2 == r2->y2; +} + +bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); +bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, + const struct drm_rect *clip); +int drm_rect_calc_hscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_hscale, int max_hscale); +int drm_rect_calc_vscale(const struct drm_rect *src, + const struct drm_rect *dst, + int min_vscale, int max_vscale); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); +void drm_rect_rotate(struct drm_rect *r, + int width, int height, + unsigned int rotation); +void drm_rect_rotate_inv(struct drm_rect *r, + int width, int height, + unsigned int rotation); + +#endif diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h new file mode 100644 index 000000000..6a483533a --- /dev/null +++ b/include/drm/drm_scdc_helper.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2015 NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_SCDC_HELPER_H +#define DRM_SCDC_HELPER_H + +#include <linux/i2c.h> +#include <linux/types.h> + +#define SCDC_SINK_VERSION 0x01 + +#define SCDC_SOURCE_VERSION 0x02 + +#define SCDC_UPDATE_0 0x10 +#define SCDC_READ_REQUEST_TEST (1 << 2) +#define SCDC_CED_UPDATE (1 << 1) +#define SCDC_STATUS_UPDATE (1 << 0) + +#define SCDC_UPDATE_1 0x11 + +#define SCDC_TMDS_CONFIG 0x20 +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1) +#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1) +#define SCDC_SCRAMBLING_ENABLE (1 << 0) + +#define SCDC_SCRAMBLER_STATUS 0x21 +#define SCDC_SCRAMBLING_STATUS (1 << 0) + +#define SCDC_CONFIG_0 0x30 +#define SCDC_READ_REQUEST_ENABLE (1 << 0) + +#define SCDC_STATUS_FLAGS_0 0x40 +#define SCDC_CH2_LOCK (1 << 3) +#define SCDC_CH1_LOCK (1 << 2) +#define SCDC_CH0_LOCK (1 << 1) +#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK) +#define SCDC_CLOCK_DETECT (1 << 0) + +#define SCDC_STATUS_FLAGS_1 0x41 + +#define SCDC_ERR_DET_0_L 0x50 +#define SCDC_ERR_DET_0_H 0x51 +#define SCDC_ERR_DET_1_L 0x52 +#define SCDC_ERR_DET_1_H 0x53 +#define SCDC_ERR_DET_2_L 0x54 +#define SCDC_ERR_DET_2_H 0x55 +#define SCDC_CHANNEL_VALID (1 << 7) + +#define SCDC_ERR_DET_CHECKSUM 0x56 + +#define SCDC_TEST_CONFIG_0 0xc0 +#define SCDC_TEST_READ_REQUEST (1 << 7) +#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f) + +#define SCDC_MANUFACTURER_IEEE_OUI 0xd0 +#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3 + +#define SCDC_DEVICE_ID 0xd3 +#define SCDC_DEVICE_ID_SIZE 8 + +#define SCDC_DEVICE_HARDWARE_REVISION 0xdb +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf) +#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf) + +#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc +#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd + +#define SCDC_MANUFACTURER_SPECIFIC 0xde +#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34 + +ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer, + size_t size); +ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, + const void *buffer, size_t size); + +/** + * drm_scdc_readb - read a single byte from SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Reads a single byte from SCDC. This is a convenience wrapper around the + * drm_scdc_read() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset, + u8 *value) +{ + return drm_scdc_read(adapter, offset, value, sizeof(*value)); +} + +/** + * drm_scdc_writeb - write a single byte to SCDC + * @adapter: I2C adapter + * @offset: offset of register to read + * @value: return location for the register value + * + * Writes a single byte to SCDC. This is a convenience wrapper around the + * drm_scdc_write() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset, + u8 value) +{ + return drm_scdc_write(adapter, offset, &value, sizeof(value)); +} + +bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter); + +bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable); +bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set); +#endif diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h new file mode 100644 index 000000000..520235c20 --- /dev/null +++ b/include/drm/drm_self_refresh_helper.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2019 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ +#ifndef DRM_SELF_REFRESH_HELPER_H_ +#define DRM_SELF_REFRESH_HELPER_H_ + +struct drm_atomic_state; +struct drm_crtc; + +void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state); +void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms, + unsigned int new_self_refresh_mask); + +int drm_self_refresh_helper_init(struct drm_crtc *crtc); +void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc); +#endif diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h new file mode 100644 index 000000000..a02637546 --- /dev/null +++ b/include/drm/drm_simple_kms_helper.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2016 Noralf Trønnes + */ + +#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H +#define __LINUX_DRM_SIMPLE_KMS_HELPER_H + +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> +#include <drm/drm_plane.h> + +struct drm_simple_display_pipe; + +/** + * struct drm_simple_display_pipe_funcs - helper operations for a simple + * display pipeline + */ +struct drm_simple_display_pipe_funcs { + /** + * @mode_valid: + * + * This callback is used to check if a specific mode is valid in the + * crtc used in this simple display pipe. This should be implemented + * if the display pipe has some sort of restriction in the modes + * it can display. For example, a given display pipe may be responsible + * to set a clock value. If the clock can not produce all the values + * for the available modes then this callback can be used to restrict + * the number of modes to only the ones that can be displayed. Another + * reason can be bandwidth mitigation: the memory port on the display + * controller can have bandwidth limitations not allowing pixel data + * to be fetched at any rate. + * + * This hook is used by the probe helpers to filter the mode list in + * drm_helper_probe_single_connector_modes(), and it is used by the + * atomic helpers to validate modes supplied by userspace in + * drm_atomic_helper_check_modeset(). + * + * This function is optional. + * + * NOTE: + * + * Since this function is both called from the check phase of an atomic + * commit, and the mode validation in the probe paths it is not allowed + * to look at anything else but the passed-in mode, and validate it + * against configuration-invariant hardware constraints. + * + * RETURNS: + * + * drm_mode_status Enum + */ + enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode); + + /** + * @enable: + * + * This function should be used to enable the pipeline. + * It is called when the underlying crtc is enabled. + * This hook is optional. + */ + void (*enable)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state); + /** + * @disable: + * + * This function should be used to disable the pipeline. + * It is called when the underlying crtc is disabled. + * This hook is optional. + */ + void (*disable)(struct drm_simple_display_pipe *pipe); + + /** + * @check: + * + * This function is called in the check phase of an atomic update, + * specifically when the underlying plane is checked. + * The simple display pipeline helpers already check that the plane is + * not scaled, fills the entire visible area and is always enabled + * when the crtc is also enabled. + * This hook is optional. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*check)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state); + /** + * @update: + * + * This function is called when the underlying plane state is updated. + * This hook is optional. + * + * This is the function drivers should submit the + * &drm_pending_vblank_event from. Using either + * drm_crtc_arm_vblank_event(), when the driver supports vblank + * interrupt handling, or drm_crtc_send_vblank_event() for more + * complex case. In case the hardware lacks vblank support entirely, + * drivers can set &struct drm_crtc_state.no_vblank in + * &struct drm_simple_display_pipe_funcs.check and let DRM's + * atomic helper fake a vblank event. + */ + void (*update)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_plane_state); + + /** + * @prepare_fb: + * + * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read + * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for + * more details. + * + * Drivers which always have their buffers pinned should use + * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook. + */ + int (*prepare_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @cleanup_fb: + * + * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read + * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for + * more details. + */ + void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + + /** + * @enable_vblank: + * + * Optional, called by &drm_crtc_funcs.enable_vblank. Please read + * the documentation for the &drm_crtc_funcs.enable_vblank hook for + * more details. + */ + int (*enable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @disable_vblank: + * + * Optional, called by &drm_crtc_funcs.disable_vblank. Please read + * the documentation for the &drm_crtc_funcs.disable_vblank hook for + * more details. + */ + void (*disable_vblank)(struct drm_simple_display_pipe *pipe); +}; + +/** + * struct drm_simple_display_pipe - simple display pipeline + * @crtc: CRTC control structure + * @plane: Plane control structure + * @encoder: Encoder control structure + * @connector: Connector control structure + * @funcs: Pipeline control functions (optional) + * + * Simple display pipeline with plane, crtc and encoder collapsed into one + * entity. It should be initialized by calling drm_simple_display_pipe_init(). + */ +struct drm_simple_display_pipe { + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector *connector; + + const struct drm_simple_display_pipe_funcs *funcs; +}; + +int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, + struct drm_bridge *bridge); + +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + struct drm_connector *connector); + +int drm_simple_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + int encoder_type); + +#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h new file mode 100644 index 000000000..6cf7243a1 --- /dev/null +++ b/include/drm/drm_syncobj.h @@ -0,0 +1,132 @@ +/* + * Copyright © 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ +#ifndef __DRM_SYNCOBJ_H__ +#define __DRM_SYNCOBJ_H__ + +#include <linux/dma-fence.h> +#include <linux/dma-fence-chain.h> + +struct drm_file; + +/** + * struct drm_syncobj - sync object. + * + * This structure defines a generic sync object which wraps a &dma_fence. + */ +struct drm_syncobj { + /** + * @refcount: Reference count of this object. + */ + struct kref refcount; + /** + * @fence: + * NULL or a pointer to the fence bound to this object. + * + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. + */ + struct dma_fence __rcu *fence; + /** + * @cb_list: List of callbacks to call when the &fence gets replaced. + */ + struct list_head cb_list; + /** + * @lock: Protects &cb_list and write-locks &fence. + */ + spinlock_t lock; + /** + * @file: A file backing for this syncobj. + */ + struct file *file; +}; + +void drm_syncobj_free(struct kref *kref); + +/** + * drm_syncobj_get - acquire a syncobj reference + * @obj: sync object + * + * This acquires an additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ +static inline void +drm_syncobj_get(struct drm_syncobj *obj) +{ + kref_get(&obj->refcount); +} + +/** + * drm_syncobj_put - release a reference to a sync object. + * @obj: sync object. + */ +static inline void +drm_syncobj_put(struct drm_syncobj *obj) +{ + kref_put(&obj->refcount, drm_syncobj_free); +} + +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ +static inline struct dma_fence * +drm_syncobj_fence_get(struct drm_syncobj *syncobj) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&syncobj->fence); + rcu_read_unlock(); + + return fence; +} + +struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, + u32 handle); +void drm_syncobj_add_point(struct drm_syncobj *syncobj, + struct dma_fence_chain *chain, + struct dma_fence *fence, + uint64_t point); +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, + struct dma_fence *fence); +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, u64 point, u64 flags, + struct dma_fence **fence); +void drm_syncobj_free(struct kref *kref); +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, + struct dma_fence *fence); +int drm_syncobj_get_handle(struct drm_file *file_private, + struct drm_syncobj *syncobj, u32 *handle); +int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd); + +#endif diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h new file mode 100644 index 000000000..d454ef617 --- /dev/null +++ b/include/drm/drm_sysfs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DRM_SYSFS_H_ +#define _DRM_SYSFS_H_ + +struct drm_device; +struct device; +struct drm_connector; +struct drm_property; + +int drm_class_device_register(struct device *dev); +void drm_class_device_unregister(struct device *dev); + +void drm_sysfs_hotplug_event(struct drm_device *dev); +void drm_sysfs_connector_status_event(struct drm_connector *connector, + struct drm_property *property); +#endif diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h new file mode 100644 index 000000000..79952d8c4 --- /dev/null +++ b/include/drm/drm_util.h @@ -0,0 +1,83 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_UTIL_H_ +#define _DRM_UTIL_H_ + +/** + * DOC: drm utils + * + * Macros and inline functions that does not naturally belong in other places + */ + +#include <linux/interrupt.h> +#include <linux/kgdb.h> +#include <linux/preempt.h> +#include <linux/smp.h> + +/* + * Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall + * only be visible for drmselftests. + */ +#if defined(CONFIG_DRM_EXPORT_FOR_TESTS) +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x) +#else +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) +#endif + +/** + * for_each_if - helper for handling conditionals in various for_each macros + * @condition: The condition to check + * + * Typical use:: + * + * #define for_each_foo_bar(x, y) \' + * list_for_each_entry(x, y->list, head) \' + * for_each_if(x->something == SOMETHING) + * + * The for_each_if() macro makes the use of for_each_foo_bar() less error + * prone. + */ +#define for_each_if(condition) if (!(condition)) {} else + +/** + * drm_can_sleep - returns true if currently okay to sleep + * + * This function shall not be used in new code. + * The check for running in atomic context may not work - see linux/preempt.h. + * + * FIXME: All users of drm_can_sleep should be removed (see todo.rst) + * + * Returns: + * False if kgdb is active, we are in atomic context or irqs are disabled. + */ +static inline bool drm_can_sleep(void) +{ + if (in_atomic() || in_dbg_master() || irqs_disabled()) + return false; + return true; +} + +#endif diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000..70775748d --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +#include <linux/types.h> + +int drm_get_panel_orientation_quirk(int width, int height); + +signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec); + +#endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h new file mode 100644 index 000000000..dd125f8c7 --- /dev/null +++ b/include/drm/drm_vblank.h @@ -0,0 +1,281 @@ +/* + * Copyright 2016 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_VBLANK_H_ +#define _DRM_VBLANK_H_ + +#include <linux/seqlock.h> +#include <linux/idr.h> +#include <linux/poll.h> +#include <linux/kthread.h> + +#include <drm/drm_file.h> +#include <drm/drm_modes.h> + +struct drm_device; +struct drm_crtc; +struct drm_vblank_work; + +/** + * struct drm_pending_vblank_event - pending vblank event tracking + */ +struct drm_pending_vblank_event { + /** + * @base: Base structure for tracking pending DRM events. + */ + struct drm_pending_event base; + /** + * @pipe: drm_crtc_index() of the &drm_crtc this event is for. + */ + unsigned int pipe; + /** + * @sequence: frame event should be triggered at + */ + u64 sequence; + /** + * @event: Actual event which will be sent to userspace. + */ + union { + /** + * @event.base: DRM event base class. + */ + struct drm_event base; + + /** + * @event.vbl: + * + * Event payload for vblank events, requested through + * either the MODE_PAGE_FLIP or MODE_ATOMIC IOCTL. Also + * generated by the legacy WAIT_VBLANK IOCTL, but new userspace + * should use MODE_QUEUE_SEQUENCE and &event.seq instead. + */ + struct drm_event_vblank vbl; + + /** + * @event.seq: Event payload for the MODE_QUEUEU_SEQUENCE IOCTL. + */ + struct drm_event_crtc_sequence seq; + } event; +}; + +/** + * struct drm_vblank_crtc - vblank tracking for a CRTC + * + * This structure tracks the vblank state for one CRTC. + * + * Note that for historical reasons - the vblank handling code is still shared + * with legacy/non-kms drivers - this is a free-standing structure not directly + * connected to &struct drm_crtc. But all public interface functions are taking + * a &struct drm_crtc to hide this implementation detail. + */ +struct drm_vblank_crtc { + /** + * @dev: Pointer to the &drm_device. + */ + struct drm_device *dev; + /** + * @queue: Wait queue for vblank waiters. + */ + wait_queue_head_t queue; + /** + * @disable_timer: Disable timer for the delayed vblank disabling + * hysteresis logic. Vblank disabling is controlled through the + * drm_vblank_offdelay module option and the setting of the + * &drm_device.max_vblank_count value. + */ + struct timer_list disable_timer; + + /** + * @seqlock: Protect vblank count and time. + */ + seqlock_t seqlock; + + /** + * @count: + * + * Current software vblank counter. + * + * Note that for a given vblank counter value drm_crtc_handle_vblank() + * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() + * provide a barrier: Any writes done before calling + * drm_crtc_handle_vblank() will be visible to callers of the later + * functions, iff the vblank count is the same or a later one. + * + * IMPORTANT: This guarantee requires barriers, therefor never access + * this field directly. Use drm_crtc_vblank_count() instead. + */ + atomic64_t count; + /** + * @time: Vblank timestamp corresponding to @count. + */ + ktime_t time; + + /** + * @refcount: Number of users/waiters of the vblank interrupt. Only when + * this refcount reaches 0 can the hardware interrupt be disabled using + * @disable_timer. + */ + atomic_t refcount; + /** + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. + */ + u32 last; + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers for this crtc. This value +1 + * will result in a wrap-around of the vblank register. It is used + * by the vblank core to handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * This is the runtime configurable per-crtc maximum set through + * drm_crtc_set_max_vblank_count(). If this is used the driver + * must leave the device wide &drm_device.max_vblank_count at zero. + * + * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set. + */ + u32 max_vblank_count; + /** + * @inmodeset: Tracks whether the vblank is disabled due to a modeset. + * For legacy driver bit 2 additionally tracks whether an additional + * temporary vblank reference has been acquired to paper over the + * hardware counter resetting/jumping. KMS drivers should instead just + * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly + * save and restore the vblank count. + */ + unsigned int inmodeset; + /** + * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this + * structure. + */ + unsigned int pipe; + /** + * @framedur_ns: Frame/Field duration in ns, used by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by + * drm_calc_timestamping_constants(). + */ + int framedur_ns; + /** + * @linedur_ns: Line duration in ns, used by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by + * drm_calc_timestamping_constants(). + */ + int linedur_ns; + + /** + * @hwmode: + * + * Cache of the current hardware display mode. Only valid when @enabled + * is set. This is used by helpers like + * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access + * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode, + * because that one is really hard to get from interrupt context. + */ + struct drm_display_mode hwmode; + + /** + * @enabled: Tracks the enabling state of the corresponding &drm_crtc to + * avoid double-disabling and hence corrupting saved state. Needed by + * drivers not using atomic KMS, since those might go through their CRTC + * disabling functions multiple times. + */ + bool enabled; + + /** + * @worker: The &kthread_worker used for executing vblank works. + */ + struct kthread_worker *worker; + + /** + * @pending_work: A list of scheduled &drm_vblank_work items that are + * waiting for a future vblank. + */ + struct list_head pending_work; + + /** + * @work_wait_queue: The wait queue used for signaling that a + * &drm_vblank_work item has either finished executing, or was + * cancelled. + */ + wait_queue_head_t work_wait_queue; +}; + +int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +bool drm_dev_has_vblank(const struct drm_device *dev); +u64 drm_crtc_vblank_count(struct drm_crtc *crtc); +u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + ktime_t *vblanktime); +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +void drm_vblank_set_event(struct drm_pending_vblank_event *e, + u64 *seq, + ktime_t *now); +bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +int drm_crtc_vblank_get(struct drm_crtc *crtc); +void drm_crtc_vblank_put(struct drm_crtc *crtc); +void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); +void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); +void drm_crtc_vblank_off(struct drm_crtc *crtc); +void drm_crtc_vblank_reset(struct drm_crtc *crtc); +void drm_crtc_vblank_on(struct drm_crtc *crtc); +u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); +void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); +void drm_crtc_vblank_restore(struct drm_crtc *crtc); + +void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode); +wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); +void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, + u32 max_vblank_count); + +/* + * Helpers for struct drm_crtc_funcs + */ + +typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, + ktime_t *etime, + const struct drm_display_mode *mode); + +bool +drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq, + drm_vblank_get_scanout_position_func get_scanout_position); +bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); + +#endif diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h new file mode 100644 index 000000000..eb41d0810 --- /dev/null +++ b/include/drm/drm_vblank_work.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_VBLANK_WORK_H_ +#define _DRM_VBLANK_WORK_H_ + +#include <linux/kthread.h> + +struct drm_crtc; + +/** + * struct drm_vblank_work - A delayed work item which delays until a target + * vblank passes, and then executes at realtime priority outside of IRQ + * context. + * + * See also: + * drm_vblank_work_schedule() + * drm_vblank_work_init() + * drm_vblank_work_cancel_sync() + * drm_vblank_work_flush() + */ +struct drm_vblank_work { + /** + * @base: The base &kthread_work item which will be executed by + * &drm_vblank_crtc.worker. Drivers should not interact with this + * directly, and instead rely on drm_vblank_work_init() to initialize + * this. + */ + struct kthread_work base; + + /** + * @vblank: A pointer to &drm_vblank_crtc this work item belongs to. + */ + struct drm_vblank_crtc *vblank; + + /** + * @count: The target vblank this work will execute on. Drivers should + * not modify this value directly, and instead use + * drm_vblank_work_schedule() + */ + u64 count; + + /** + * @cancelling: The number of drm_vblank_work_cancel_sync() calls that + * are currently running. A work item cannot be rescheduled until all + * calls have finished. + */ + int cancelling; + + /** + * @node: The position of this work item in + * &drm_vblank_crtc.pending_work. + */ + struct list_head node; +}; + +/** + * to_drm_vblank_work - Retrieve the respective &drm_vblank_work item from a + * &kthread_work + * @_work: The &kthread_work embedded inside a &drm_vblank_work + */ +#define to_drm_vblank_work(_work) \ + container_of((_work), struct drm_vblank_work, base) + +int drm_vblank_work_schedule(struct drm_vblank_work *work, + u64 count, bool nextonmiss); +void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, + void (*func)(struct kthread_work *work)); +bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work); +void drm_vblank_work_flush(struct drm_vblank_work *work); + +#endif /* !_DRM_VBLANK_WORK_H_ */ diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h new file mode 100644 index 000000000..76ac5e97a --- /dev/null +++ b/include/drm/drm_vma_manager.h @@ -0,0 +1,246 @@ +#ifndef __DRM_VMA_MANAGER_H__ +#define __DRM_VMA_MANAGER_H__ + +/* + * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm_mm.h> +#include <linux/mm.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +/* We make up offsets for buffer objects so we can recognize them at + * mmap time. pgoff in mmap is an unsigned long, so we need to make sure + * that the faked up offset will fit + */ +#if BITS_PER_LONG == 64 +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256) +#else +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) +#endif + +struct drm_file; + +struct drm_vma_offset_file { + struct rb_node vm_rb; + struct drm_file *vm_tag; + unsigned long vm_count; +}; + +struct drm_vma_offset_node { + rwlock_t vm_lock; + struct drm_mm_node vm_node; + struct rb_root vm_files; + bool readonly:1; +}; + +struct drm_vma_offset_manager { + rwlock_t vm_lock; + struct drm_mm vm_addr_space_mm; +}; + +void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, + unsigned long page_offset, unsigned long size); +void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); + +struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages); +int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node, unsigned long pages); +void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, + struct drm_vma_offset_node *node); + +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); +void drm_vma_node_revoke(struct drm_vma_offset_node *node, + struct drm_file *tag); +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct drm_file *tag); + +/** + * drm_vma_offset_exact_lookup_locked() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup_locked(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +} + +/** + * drm_vma_offset_lock_lookup() - Lock lookup for extended private use + * @mgr: Manager object + * + * Lock VMA manager for extended lookups. Only locked VMA function calls + * are allowed while holding this lock. All other contexts are blocked from VMA + * until the lock is released via drm_vma_offset_unlock_lookup(). + * + * Use this if you need to take a reference to the objects returned by + * drm_vma_offset_lookup_locked() before releasing this lock again. + * + * This lock must not be used for anything else than extended lookups. You must + * not call any other VMA helpers while holding this lock. + * + * Note: You're in atomic-context while holding this lock! + */ +static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_lock(&mgr->vm_lock); +} + +/** + * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use + * @mgr: Manager object + * + * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information. + */ +static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr) +{ + read_unlock(&mgr->vm_lock); +} + +/** + * drm_vma_node_reset() - Initialize or reset node object + * @node: Node to initialize or reset + * + * Reset a node to its initial state. This must be called before using it with + * any VMA offset manager. + * + * This must not be called on an already allocated node, or you will leak + * memory. + */ +static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) +{ + memset(node, 0, sizeof(*node)); + node->vm_files = RB_ROOT; + rwlock_init(&node->vm_lock); +} + +/** + * drm_vma_node_start() - Return start address for page-based addressing + * @node: Node to inspect + * + * Return the start address of the given node. This can be used as offset into + * the linear VM space that is provided by the VMA offset manager. Note that + * this can only be used for page-based addressing. If you need a proper offset + * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the + * drm_vma_node_offset_addr() helper instead. + * + * RETURNS: + * Start address of @node for page-based addressing. 0 if the node does not + * have an offset allocated. + */ +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) +{ + return node->vm_node.start; +} + +/** + * drm_vma_node_size() - Return size (page-based) + * @node: Node to inspect + * + * Return the size as number of pages for the given node. This is the same size + * that was passed to drm_vma_offset_add(). If no offset is allocated for the + * node, this is 0. + * + * RETURNS: + * Size of @node as number of pages. 0 if the node does not have an offset + * allocated. + */ +static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) +{ + return node->vm_node.size; +} + +/** + * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps + * @node: Linked offset node + * + * Same as drm_vma_node_start() but returns the address as a valid offset that + * can be used for user-space mappings during mmap(). + * This must not be called on unlinked nodes. + * + * RETURNS: + * Offset of @node for byte-based addressing. 0 if the node does not have an + * object allocated. + */ +static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) +{ + return ((__u64)node->vm_node.start) << PAGE_SHIFT; +} + +/** + * drm_vma_node_unmap() - Unmap offset node + * @node: Offset node + * @file_mapping: Address space to unmap @node from + * + * Unmap all userspace mappings for a given offset node. The mappings must be + * associated with the @file_mapping address-space. If no offset exists + * nothing is done. + * + * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() + * is not called on this node concurrently. + */ +static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, + struct address_space *file_mapping) +{ + if (drm_mm_node_allocated(&node->vm_node)) + unmap_mapping_range(file_mapping, + drm_vma_node_offset_addr(node), + drm_vma_node_size(node) << PAGE_SHIFT, 1); +} + +/** + * drm_vma_node_verify_access() - Access verification helper for TTM + * @node: Offset node + * @tag: Tag of file to check + * + * This checks whether @tag is granted access to @node. It is the same as + * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM + * verify_access() callbacks. + * + * RETURNS: + * 0 if access is granted, -EACCES otherwise. + */ +static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, + struct drm_file *tag) +{ + return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES; +} + +#endif /* __DRM_VMA_MANAGER_H__ */ diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h new file mode 100644 index 000000000..9697d2714 --- /dev/null +++ b/include/drm/drm_writeback.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Brian Starkey <brian.starkey@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + */ + +#ifndef __DRM_WRITEBACK_H__ +#define __DRM_WRITEBACK_H__ +#include <drm/drm_connector.h> +#include <drm/drm_encoder.h> +#include <linux/workqueue.h> + +/** + * struct drm_writeback_connector - DRM writeback connector + */ +struct drm_writeback_connector { + /** + * @base: base drm_connector object + */ + struct drm_connector base; + + /** + * @encoder: Internal encoder used by the connector to fulfill + * the DRM framework requirements. The users of the + * @drm_writeback_connector control the behaviour of the @encoder + * by passing the @enc_funcs parameter to drm_writeback_connector_init() + * function. + */ + struct drm_encoder encoder; + + /** + * @pixel_formats_blob_ptr: + * + * DRM blob property data for the pixel formats list on writeback + * connectors + * See also drm_writeback_connector_init() + */ + struct drm_property_blob *pixel_formats_blob_ptr; + + /** @job_lock: Protects job_queue */ + spinlock_t job_lock; + + /** + * @job_queue: + * + * Holds a list of a connector's writeback jobs; the last item is the + * most recent. The first item may be either waiting for the hardware + * to begin writing, or currently being written. + * + * See also: drm_writeback_queue_job() and + * drm_writeback_signal_completion() + */ + struct list_head job_queue; + + /** + * @fence_context: + * + * timeline context used for fence operations. + */ + unsigned int fence_context; + /** + * @fence_lock: + * + * spinlock to protect the fences in the fence_context. + */ + spinlock_t fence_lock; + /** + * @fence_seqno: + * + * Seqno variable used as monotonic counter for the fences + * created on the connector's timeline. + */ + unsigned long fence_seqno; + /** + * @timeline_name: + * + * The name of the connector's fence timeline. + */ + char timeline_name[32]; +}; + +/** + * struct drm_writeback_job - DRM writeback job + */ +struct drm_writeback_job { + /** + * @connector: + * + * Back-pointer to the writeback connector associated with the job + */ + struct drm_writeback_connector *connector; + + /** + * @prepared: + * + * Set when the job has been prepared with drm_writeback_prepare_job() + */ + bool prepared; + + /** + * @cleanup_work: + * + * Used to allow drm_writeback_signal_completion to defer dropping the + * framebuffer reference to a workqueue + */ + struct work_struct cleanup_work; + + /** + * @list_entry: + * + * List item for the writeback connector's @job_queue + */ + struct list_head list_entry; + + /** + * @fb: + * + * Framebuffer to be written to by the writeback connector. Do not set + * directly, use drm_writeback_set_fb() + */ + struct drm_framebuffer *fb; + + /** + * @out_fence: + * + * Fence which will signal once the writeback has completed + */ + struct dma_fence *out_fence; + + /** + * @priv: + * + * Driver-private data + */ + void *priv; +}; + +static inline struct drm_writeback_connector * +drm_connector_to_writeback(struct drm_connector *connector) +{ + return container_of(connector, struct drm_writeback_connector, base); +} + +int drm_writeback_connector_init(struct drm_device *dev, + struct drm_writeback_connector *wb_connector, + const struct drm_connector_funcs *con_funcs, + const struct drm_encoder_helper_funcs *enc_helper_funcs, + const u32 *formats, int n_formats); + +int drm_writeback_set_fb(struct drm_connector_state *conn_state, + struct drm_framebuffer *fb); + +int drm_writeback_prepare_job(struct drm_writeback_job *job); + +void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector, + struct drm_connector_state *conn_state); + +void drm_writeback_cleanup_job(struct drm_writeback_job *job); + +void +drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector, + int status); + +struct dma_fence * +drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector); +#endif diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h new file mode 100644 index 000000000..228f43e8d --- /dev/null +++ b/include/drm/gma_drm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/************************************************************************** + * Copyright (c) 2007-2011, Intel Corporation. + * All Rights Reserved. + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. + * All Rights Reserved. + * + **************************************************************************/ + +#ifndef _GMA_DRM_H_ +#define _GMA_DRM_H_ + +#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000..92436553f --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,350 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include <drm/spsc_queue.h> +#include <linux/dma-fence.h> +#include <linux/completion.h> + +#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +/* These are often used as an (initial) index + * to an array, and as such should start at 0. + */ +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_KERNEL, + + DRM_SCHED_PRIORITY_COUNT, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * struct drm_sched_entity - A wrapper around a job queue (typically + * attached to the DRM file_priv). + * + * @list: used to append this struct to the list of entities in the + * runqueue. + * @rq: runqueue on which this entity is currently scheduled. + * @sched_list: A list of schedulers (drm_gpu_schedulers). + * Jobs from this entity can be scheduled on any scheduler + * on this list. + * @num_sched_list: number of drm_gpu_schedulers in the sched_list. + * @priority: priority of the entity + * @rq_lock: lock to modify the runqueue to which this entity belongs. + * @job_queue: the list of jobs of this entity. + * @fence_seq: a linearly increasing seqno incremented with each + * new &drm_sched_fence which is part of the entity. + * @fence_context: a unique context for all the fences which belong + * to this entity. + * The &drm_sched_fence.scheduled uses the + * fence_context but &drm_sched_fence.finished uses + * fence_context + 1. + * @dependency: the dependency fence of the job which is on the top + * of the job queue. + * @cb: callback for the dependency fence above. + * @guilty: points to ctx's guilty. + * @fini_status: contains the exit status in case the process was signalled. + * @last_scheduled: points to the finished fence of the last scheduled job. + * @last_user: last group leader pushing a job into the entity. + * @stopped: Marks the enity as removed from rq and destined for termination. + * @entity_idle: Signals when enityt is not in use + * + * Entities will emit jobs in order to their corresponding hardware + * ring, and the scheduler will alternate between entities based on + * scheduling policy. + */ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + struct drm_gpu_scheduler **sched_list; + unsigned int num_sched_list; + enum drm_sched_priority priority; + spinlock_t rq_lock; + + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; + struct dma_fence *last_scheduled; + struct task_struct *last_user; + bool stopped; + struct completion entity_idle; +}; + +/** + * struct drm_sched_rq - queue of entities to be scheduled. + * + * @lock: to modify the entities list. + * @sched: the scheduler to which this rq belongs to. + * @entities: list of the entities to be scheduled. + * @current_entity: the entity which is to be scheduled. + * + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. + */ +struct drm_sched_rq { + spinlock_t lock; + struct drm_gpu_scheduler *sched; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +/** + * struct drm_sched_fence - fences corresponding to the scheduling of a job. + */ +struct drm_sched_fence { + /** + * @scheduled: this fence is what will be signaled by the scheduler + * when the job is scheduled. + */ + struct dma_fence scheduled; + + /** + * @finished: this fence is what will be signaled by the scheduler + * when the job is completed. + * + * When setting up an out fence for the job, you should use + * this, since it's available immediately upon + * drm_sched_job_init(), and the fence returned by the driver + * from run_job() won't be created until the dependencies have + * resolved. + */ + struct dma_fence finished; + + /** + * @parent: the fence returned by &drm_sched_backend_ops.run_job + * when scheduling the job on hardware. We signal the + * &drm_sched_fence.finished fence once parent is signalled. + */ + struct dma_fence *parent; + /** + * @sched: the scheduler instance to which the job having this struct + * belongs to. + */ + struct drm_gpu_scheduler *sched; + /** + * @lock: the lock used by the scheduled and the finished fences. + */ + spinlock_t lock; + /** + * @owner: job owner for debugging + */ + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +/** + * struct drm_sched_job - A job to be run by an entity. + * + * @queue_node: used to append this struct to the queue of jobs in an entity. + * @sched: the scheduler instance on which this job is scheduled. + * @s_fence: contains the fences for the scheduling of job. + * @finish_cb: the callback for the finished fence. + * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. + * @id: a unique id assigned to each job scheduled on the scheduler. + * @karma: increment on every hang caused by this job. If this exceeds the hang + * limit of the scheduler then the job is marked guilty and will not + * be scheduled further. + * @s_priority: the priority of the job. + * @entity: the entity to which this job belongs. + * @cb: the callback for the parent fence in s_fence. + * + * A job is created by the driver using drm_sched_job_init(), and + * should call drm_sched_entity_push_job() once it wants the scheduler + * to schedule the job. + */ +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct list_head node; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; + struct drm_sched_entity *entity; + struct dma_fence_cb cb; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * struct drm_sched_backend_ops + * + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side. + */ +struct drm_sched_backend_ops { + /** + * @dependency: Called when the scheduler is considering scheduling + * this job next, to get another struct dma_fence for this job to + * block on. Once it returns NULL, run_job() may be called. + */ + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + + /** + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. This may be called multiple times, if + * timedout_job() has happened and drm_sched_job_recovery() + * decides to try it again. + */ + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + + /** + * @timedout_job: Called when a job has taken too long to execute, + * to trigger GPU recovery. + */ + void (*timedout_job)(struct drm_sched_job *sched_job); + + /** + * @free_job: Called once the job's finished fence has been signaled + * and it's time to clean it up. + */ + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * struct drm_gpu_scheduler + * + * @ops: backend operations provided by the driver. + * @hw_submission_limit: the max size of the hardware queue. + * @timeout: the time after which a job is removed from the scheduler. + * @name: name of the ring for which this scheduler is being used. + * @sched_rq: priority wise array of run queues. + * @wake_up_worker: the wait queue on which the scheduler sleeps until a job + * is ready to be scheduled. + * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler + * waits on this wait queue until all the scheduled jobs are + * finished. + * @hw_rq_count: the number of jobs currently in the hardware queue. + * @job_id_count: used to assign unique id to the each job. + * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the + * timeout interval is over. + * @thread: the kthread on which the scheduler which run. + * @ring_mirror_list: the list of jobs which are currently in the job queue. + * @job_list_lock: lock to protect the ring_mirror_list. + * @hang_limit: once the hangs by a job crosses this limit then it is marked + * guilty and it will be considered for scheduling further. + * @score: score to help loadbalancer pick a idle sched + * @ready: marks if the underlying HW is ready to work + * @free_guilty: A hit to time out handler to free the guilty job. + * + * One scheduler is implemented for each hardware ring. + */ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct delayed_work work_tdr; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; + atomic_t score; + bool ready; + bool free_guilty; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); + +void drm_sched_fini(struct drm_gpu_scheduler *sched); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +void drm_sched_job_cleanup(struct drm_sched_job *job); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); +void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_increase_karma(struct drm_sched_job *bad); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_fault(struct drm_gpu_scheduler *sched); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); + +int drm_sched_entity_init(struct drm_sched_entity *entity, + enum drm_sched_priority priority, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list, + atomic_t *guilty); +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); +void drm_sched_entity_fini(struct drm_sched_entity *entity); +void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority); +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); + +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, + unsigned long remaining); +struct drm_gpu_scheduler * +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +#endif diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h new file mode 100644 index 000000000..8390b437a --- /dev/null +++ b/include/drm/i2c/ch7006.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_CH7006_H__ +#define __DRM_I2C_CH7006_H__ + +/** + * struct ch7006_encoder_params + * + * Describes how the ch7006 is wired up with the GPU. It should be + * used as the @params parameter of its @set_config method. + * + * See "http://www.chrontel.com/pdf/7006.pdf" for their precise + * meaning. + */ +struct ch7006_encoder_params { + enum { + CH7006_FORMAT_RGB16 = 0, + CH7006_FORMAT_YCrCb24m16, + CH7006_FORMAT_RGB24m16, + CH7006_FORMAT_RGB15, + CH7006_FORMAT_RGB24m12C, + CH7006_FORMAT_RGB24m12I, + CH7006_FORMAT_RGB24m8, + CH7006_FORMAT_RGB16m8, + CH7006_FORMAT_RGB15m8, + CH7006_FORMAT_YCrCb24m8, + } input_format; + + enum { + CH7006_CLOCK_SLAVE = 0, + CH7006_CLOCK_MASTER, + } clock_mode; + + enum { + CH7006_CLOCK_EDGE_NEG = 0, + CH7006_CLOCK_EDGE_POS, + } clock_edge; + + int xcm, pcm; + + enum { + CH7006_SYNC_SLAVE = 0, + CH7006_SYNC_MASTER, + } sync_direction; + + enum { + CH7006_SYNC_SEPARATED = 0, + CH7006_SYNC_EMBEDDED, + } sync_encoding; + + enum { + CH7006_POUT_1_8V = 0, + CH7006_POUT_3_3V, + } pout_level; + + enum { + CH7006_ACTIVE_HSYNC = 0, + CH7006_ACTIVE_DSTART, + } active_detect; +}; + +#endif diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h new file mode 100644 index 000000000..205e27384 --- /dev/null +++ b/include/drm/i2c/sil164.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DRM_I2C_SIL164_H__ +#define __DRM_I2C_SIL164_H__ + +/** + * struct sil164_encoder_params + * + * Describes how the sil164 is connected to the GPU. It should be used + * as the @params parameter of its @set_config method. + * + * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". + */ +struct sil164_encoder_params { + enum { + SIL164_INPUT_EDGE_FALLING = 0, + SIL164_INPUT_EDGE_RISING + } input_edge; + + enum { + SIL164_INPUT_WIDTH_12BIT = 0, + SIL164_INPUT_WIDTH_24BIT + } input_width; + + enum { + SIL164_INPUT_SINGLE_EDGE = 0, + SIL164_INPUT_DUAL_EDGE + } input_dual; + + enum { + SIL164_PLL_FILTER_ON = 0, + SIL164_PLL_FILTER_OFF, + } pll_filter; + + int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */ + int duallink_skew; /** < Allowed range [-4, 3]. */ +}; + +#endif diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h new file mode 100644 index 000000000..3cb25ccbe --- /dev/null +++ b/include/drm/i2c/tda998x.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_I2C_TDA998X_H__ +#define __DRM_I2C_TDA998X_H__ + +#include <linux/hdmi.h> +#include <dt-bindings/display/tda998x.h> + +enum { + AFMT_UNUSED = 0, + AFMT_SPDIF = TDA998x_SPDIF, + AFMT_I2S = TDA998x_I2S, +}; + +struct tda998x_audio_params { + u8 config; + u8 format; + unsigned sample_width; + unsigned sample_rate; + struct hdmi_audio_infoframe cea; + u8 status[5]; +}; + +struct tda998x_encoder_params { + u8 swap_b:3; + u8 mirr_b:1; + u8 swap_a:3; + u8 mirr_a:1; + u8 swap_d:3; + u8 mirr_d:1; + u8 swap_c:3; + u8 mirr_c:1; + u8 swap_f:3; + u8 mirr_f:1; + u8 swap_e:3; + u8 mirr_e:1; + + struct tda998x_audio_params audio_params; +}; + +#endif diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h new file mode 100644 index 000000000..55c3b1235 --- /dev/null +++ b/include/drm/i915_component.h @@ -0,0 +1,54 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _I915_COMPONENT_H_ +#define _I915_COMPONENT_H_ + +#include "drm_audio_component.h" + +enum i915_component_type { + I915_COMPONENT_AUDIO = 1, + I915_COMPONENT_HDCP, +}; + +/* MAX_PORT is the number of port + * It must be sync with I915_MAX_PORTS defined i915_drv.h + */ +#define MAX_PORTS 9 + +/** + * struct i915_audio_component - Used for direct communication between i915 and hda drivers + */ +struct i915_audio_component { + /** + * @base: the drm_audio_component base class + */ + struct drm_audio_component base; + + /** + * @aud_sample_rate: the array of audio sample rate per port + */ + int aud_sample_rate[MAX_PORTS]; +}; + +#endif /* _I915_COMPONENT_H_ */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h new file mode 100644 index 000000000..672200588 --- /dev/null +++ b/include/drm/i915_drm.h @@ -0,0 +1,103 @@ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _I915_DRM_H_ +#define _I915_DRM_H_ + +#include <drm/i915_pciids.h> +#include <uapi/drm/i915_drm.h> + +/* For use by IPS driver */ +unsigned long i915_read_mch_val(void); +bool i915_gpu_raise(void); +bool i915_gpu_lower(void); +bool i915_gpu_busy(void); +bool i915_gpu_turbo_disable(void); + +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + +/* + * The Bridge device's PCI config space has information about the + * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. + */ +#define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ +#define SNB_GMCH_GGMS_MASK 0x3 +#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ +#define SNB_GMCH_GMS_MASK 0x1f +#define BDW_GMCH_GGMS_SHIFT 6 +#define BDW_GMCH_GGMS_MASK 0x3 +#define BDW_GMCH_GMS_SHIFT 8 +#define BDW_GMCH_GMS_MASK 0xff + +#define I830_GMCH_CTRL 0x52 + +#define I830_GMCH_GMS_MASK 0x70 +#define I830_GMCH_GMS_LOCAL 0x10 +#define I830_GMCH_GMS_STOLEN_512 0x20 +#define I830_GMCH_GMS_STOLEN_1024 0x30 +#define I830_GMCH_GMS_STOLEN_8192 0x40 + +#define I855_GMCH_GMS_MASK 0xF0 +#define I855_GMCH_GMS_STOLEN_0M 0x0 +#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) +#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) +#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) +#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) +#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) +#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) +#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) +#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) +#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) + +#define I830_DRB3 0x63 +#define I85X_DRB3 0x43 +#define I865_TOUD 0xc4 + +#define I830_ESMRAMC 0x91 +#define I845_ESMRAMC 0x9e +#define I85X_ESMRAMC 0x61 +#define TSEG_ENABLE (1 << 0) +#define I830_TSEG_SIZE_512K (0 << 1) +#define I830_TSEG_SIZE_1M (1 << 1) +#define I845_TSEG_SIZE_MASK (3 << 1) +#define I845_TSEG_SIZE_512K (2 << 1) +#define I845_TSEG_SIZE_1M (3 << 1) + +#define INTEL_BSM 0x5c +#define INTEL_GEN11_BSM_DW0 0xc0 +#define INTEL_GEN11_BSM_DW1 0xc4 +#define INTEL_BSM_MASK (-(1u << 20)) + +#endif /* _I915_DRM_H_ */ diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h new file mode 100644 index 000000000..702f61324 --- /dev/null +++ b/include/drm/i915_mei_hdcp_interface.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: (GPL-2.0+) */ +/* + * Copyright © 2017-2019 Intel Corporation + * + * Authors: + * Ramalingam C <ramalingam.c@intel.com> + */ + +#ifndef _I915_MEI_HDCP_INTERFACE_H_ +#define _I915_MEI_HDCP_INTERFACE_H_ + +#include <linux/mutex.h> +#include <linux/device.h> +#include <drm/drm_hdcp.h> + +/** + * enum hdcp_port_type - HDCP port implementation type defined by ME FW + * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type + * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port + * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON + * (HDMI 2.0) solution + * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) + * solution + */ +enum hdcp_port_type { + HDCP_PORT_TYPE_INVALID, + HDCP_PORT_TYPE_INTEGRATED, + HDCP_PORT_TYPE_LSPCON, + HDCP_PORT_TYPE_CPDP +}; + +/** + * enum hdcp_wired_protocol - HDCP adaptation used on the port + * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol + * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port + * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port + */ +enum hdcp_wired_protocol { + HDCP_PROTOCOL_INVALID, + HDCP_PROTOCOL_HDMI, + HDCP_PROTOCOL_DP +}; + +enum mei_fw_ddi { + MEI_DDI_INVALID_PORT = 0x0, + + MEI_DDI_B = 1, + MEI_DDI_C, + MEI_DDI_D, + MEI_DDI_E, + MEI_DDI_F, + MEI_DDI_A = 7, + MEI_DDI_RANGE_END = MEI_DDI_A, +}; + +/** + * enum mei_fw_tc - ME Firmware defined index for transcoders + * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder + * @MEI_TRANSCODER_EDP: Index for EDP Transcoder + * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder + * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder + * @MEI_TRANSCODER_A: Index for Transcoder A + * @MEI_TRANSCODER_B: Index for Transcoder B + * @MEI_TRANSCODER_C: Index for Transcoder C + * @MEI_TRANSCODER_D: Index for Transcoder D + */ +enum mei_fw_tc { + MEI_INVALID_TRANSCODER = 0x00, + MEI_TRANSCODER_EDP, + MEI_TRANSCODER_DSI0, + MEI_TRANSCODER_DSI1, + MEI_TRANSCODER_A = 0x10, + MEI_TRANSCODER_B, + MEI_TRANSCODER_C, + MEI_TRANSCODER_D +}; + +/** + * struct hdcp_port_data - intel specific HDCP port data + * @fw_ddi: ddi index as per ME FW + * @fw_tc: transcoder index as per ME FW + * @port_type: HDCP port type as per ME FW classification + * @protocol: HDCP adaptation as per ME FW + * @k: No of streams transmitted on a port. Only on DP MST this is != 1 + * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the + * streams + */ +struct hdcp_port_data { + enum mei_fw_ddi fw_ddi; + enum mei_fw_tc fw_tc; + u8 port_type; + u8 protocol; + u16 k; + u32 seq_num_m; + struct hdcp2_streamid_type *streams; +}; + +/** + * struct i915_hdcp_component_ops- ops for HDCP2.2 services. + * @owner: Module providing the ops + * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. + * And Prepare AKE_Init. + * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate + * AKE_Send_Cert and prepare + AKE_Stored_Km/AKE_No_Stored_Km + * @verify_hprime: Verify AKE_Send_H_prime + * @store_pairing_info: Store pairing info received + * @initiate_locality_check: Prepare LC_Init + * @verify_lprime: Verify lprime + * @get_session_key: Prepare SKE_Send_Eks + * @repeater_check_flow_prepare_ack: Validate the Downstream topology + * and prepare rep_ack + * @verify_mprime: Verify mprime + * @enable_hdcp_authentication: Mark a port as authenticated. + * @close_hdcp_session: Close the Wired HDCP Tx session per port. + * This also disables the authenticated state of the port. + */ +struct i915_hdcp_component_ops { + /** + * @owner: mei_hdcp module + */ + struct module *owner; + + int (*initiate_hdcp2_session)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data); + int (*verify_receiver_cert_prepare_km)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert + *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz); + int (*verify_hprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime); + int (*store_pairing_info)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info + *pairing_info); + int (*initiate_locality_check)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data); + int (*verify_lprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime); + int (*get_session_key)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data); + int (*repeater_check_flow_prepare_ack)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack); + int (*verify_mprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready); + int (*enable_hdcp_authentication)(struct device *dev, + struct hdcp_port_data *data); + int (*close_hdcp_session)(struct device *dev, + struct hdcp_port_data *data); +}; + +/** + * struct i915_hdcp_component_master - Used for communication between i915 + * and mei_hdcp drivers for the HDCP2.2 services + * @mei_dev: device that provide the HDCP2.2 service from MEI Bus. + * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver. + */ +struct i915_hdcp_comp_master { + struct device *mei_dev; + const struct i915_hdcp_component_ops *ops; + + /* To protect the above members. */ + struct mutex mutex; +}; + +#endif /* _I915_MEI_HDCP_INTERFACE_H_ */ diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h new file mode 100644 index 000000000..7eeecb07c --- /dev/null +++ b/include/drm/i915_pciids.h @@ -0,0 +1,629 @@ +/* + * Copyright 2013 Intel Corporation + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _I915_PCIIDS_H +#define _I915_PCIIDS_H + +/* + * A pci_device_id struct { + * __u32 vendor, device; + * __u32 subvendor, subdevice; + * __u32 class, class_mask; + * kernel_ulong_t driver_data; + * }; + * Don't use C99 here because "class" is reserved and we want to + * give userspace flexibility. + */ +#define INTEL_VGA_DEVICE(id, info) { \ + 0x8086, id, \ + ~0, ~0, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_QUANTA_VGA_DEVICE(info) { \ + 0x8086, 0x16a, \ + 0x152d, 0x8990, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +#define INTEL_I810_IDS(info) \ + INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \ + INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \ + INTEL_VGA_DEVICE(0x7125, info) /* I810_E */ + +#define INTEL_I815_IDS(info) \ + INTEL_VGA_DEVICE(0x1132, info) /* I815*/ + +#define INTEL_I830_IDS(info) \ + INTEL_VGA_DEVICE(0x3577, info) + +#define INTEL_I845G_IDS(info) \ + INTEL_VGA_DEVICE(0x2562, info) + +#define INTEL_I85X_IDS(info) \ + INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ + INTEL_VGA_DEVICE(0x358e, info) + +#define INTEL_I865G_IDS(info) \ + INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ + +#define INTEL_I915G_IDS(info) \ + INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ + INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ + +#define INTEL_I915GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ + +#define INTEL_I945G_IDS(info) \ + INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ + +#define INTEL_I945GM_IDS(info) \ + INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ + INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ + +#define INTEL_I965G_IDS(info) \ + INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ + INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ + INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ + INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ + +#define INTEL_G33_IDS(info) \ + INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ + INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ + INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ + +#define INTEL_I965GM_IDS(info) \ + INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ + INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ + +#define INTEL_GM45_IDS(info) \ + INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ + +#define INTEL_G45_IDS(info) \ + INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ + INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ + INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ + INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ + INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ + INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ + +#define INTEL_PINEVIEW_G_IDS(info) \ + INTEL_VGA_DEVICE(0xa001, info) + +#define INTEL_PINEVIEW_M_IDS(info) \ + INTEL_VGA_DEVICE(0xa011, info) + +#define INTEL_IRONLAKE_D_IDS(info) \ + INTEL_VGA_DEVICE(0x0042, info) + +#define INTEL_IRONLAKE_M_IDS(info) \ + INTEL_VGA_DEVICE(0x0046, info) + +#define INTEL_SNB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0102, info), \ + INTEL_VGA_DEVICE(0x010A, info) + +#define INTEL_SNB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0112, info), \ + INTEL_VGA_DEVICE(0x0122, info) + +#define INTEL_SNB_D_IDS(info) \ + INTEL_SNB_D_GT1_IDS(info), \ + INTEL_SNB_D_GT2_IDS(info) + +#define INTEL_SNB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0106, info) + +#define INTEL_SNB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0116, info), \ + INTEL_VGA_DEVICE(0x0126, info) + +#define INTEL_SNB_M_IDS(info) \ + INTEL_SNB_M_GT1_IDS(info), \ + INTEL_SNB_M_GT2_IDS(info) + +#define INTEL_IVB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ + +#define INTEL_IVB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ + +#define INTEL_IVB_M_IDS(info) \ + INTEL_IVB_M_GT1_IDS(info), \ + INTEL_IVB_M_GT2_IDS(info) + +#define INTEL_IVB_D_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ + +#define INTEL_IVB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ + +#define INTEL_IVB_D_IDS(info) \ + INTEL_IVB_D_GT1_IDS(info), \ + INTEL_IVB_D_GT2_IDS(info) + +#define INTEL_IVB_Q_IDS(info) \ + INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ + +#define INTEL_HSW_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ + INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */ + +#define INTEL_HSW_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */ + +#define INTEL_HSW_GT1_IDS(info) \ + INTEL_HSW_ULT_GT1_IDS(info), \ + INTEL_HSW_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */ + +#define INTEL_HSW_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */ + +#define INTEL_HSW_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \ + +#define INTEL_HSW_GT2_IDS(info) \ + INTEL_HSW_ULT_GT2_IDS(info), \ + INTEL_HSW_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */ + +#define INTEL_HSW_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */ + +#define INTEL_HSW_GT3_IDS(info) \ + INTEL_HSW_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ + +#define INTEL_HSW_IDS(info) \ + INTEL_HSW_GT1_IDS(info), \ + INTEL_HSW_GT2_IDS(info), \ + INTEL_HSW_GT3_IDS(info) + +#define INTEL_VLV_IDS(info) \ + INTEL_VGA_DEVICE(0x0f30, info), \ + INTEL_VGA_DEVICE(0x0f31, info), \ + INTEL_VGA_DEVICE(0x0f32, info), \ + INTEL_VGA_DEVICE(0x0f33, info) + +#define INTEL_BDW_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */ + +#define INTEL_BDW_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */ + +#define INTEL_BDW_GT1_IDS(info) \ + INTEL_BDW_ULT_GT1_IDS(info), \ + INTEL_BDW_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ + +#define INTEL_BDW_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */ + +#define INTEL_BDW_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ + +#define INTEL_BDW_GT2_IDS(info) \ + INTEL_BDW_ULT_GT2_IDS(info), \ + INTEL_BDW_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ + INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ + +#define INTEL_BDW_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \ + +#define INTEL_BDW_ULX_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x162E, info) /* ULX */ + +#define INTEL_BDW_GT3_IDS(info) \ + INTEL_BDW_ULT_GT3_IDS(info), \ + INTEL_BDW_ULX_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ + +#define INTEL_BDW_ULT_RSVD_IDS(info) \ + INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163B, info) /* Iris */ + +#define INTEL_BDW_ULX_RSVD_IDS(info) \ + INTEL_VGA_DEVICE(0x163E, info) /* ULX */ + +#define INTEL_BDW_RSVD_IDS(info) \ + INTEL_BDW_ULT_RSVD_IDS(info), \ + INTEL_BDW_ULX_RSVD_IDS(info), \ + INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ + +#define INTEL_BDW_IDS(info) \ + INTEL_BDW_GT1_IDS(info), \ + INTEL_BDW_GT2_IDS(info), \ + INTEL_BDW_GT3_IDS(info), \ + INTEL_BDW_RSVD_IDS(info) + +#define INTEL_CHV_IDS(info) \ + INTEL_VGA_DEVICE(0x22b0, info), \ + INTEL_VGA_DEVICE(0x22b1, info), \ + INTEL_VGA_DEVICE(0x22b2, info), \ + INTEL_VGA_DEVICE(0x22b3, info) + +#define INTEL_SKL_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */ + +#define INTEL_SKL_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */ + +#define INTEL_SKL_GT1_IDS(info) \ + INTEL_SKL_ULT_GT1_IDS(info), \ + INTEL_SKL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */ + +#define INTEL_SKL_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */ + +#define INTEL_SKL_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */ + +#define INTEL_SKL_GT2_IDS(info) \ + INTEL_SKL_ULT_GT2_IDS(info), \ + INTEL_SKL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ + +#define INTEL_SKL_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */ + +#define INTEL_SKL_GT3_IDS(info) \ + INTEL_SKL_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */ + +#define INTEL_SKL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */ + +#define INTEL_SKL_IDS(info) \ + INTEL_SKL_GT1_IDS(info), \ + INTEL_SKL_GT2_IDS(info), \ + INTEL_SKL_GT3_IDS(info), \ + INTEL_SKL_GT4_IDS(info) + +#define INTEL_BXT_IDS(info) \ + INTEL_VGA_DEVICE(0x0A84, info), \ + INTEL_VGA_DEVICE(0x1A84, info), \ + INTEL_VGA_DEVICE(0x1A85, info), \ + INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ + INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ + +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + +#define INTEL_KBL_ULT_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */ + +#define INTEL_KBL_ULX_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */ + +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_KBL_ULT_GT1_IDS(info), \ + INTEL_KBL_ULX_GT1_IDS(info), \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_ULT_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */ + +#define INTEL_KBL_ULX_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_KBL_ULT_GT2_IDS(info), \ + INTEL_KBL_ULX_GT2_IDS(info), \ + INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_ULT_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_KBL_ULT_GT3_IDS(info), \ + INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ + +/* AML/KBL Y GT2 */ +#define INTEL_AML_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ + +/* AML/CFL Y GT2 */ +#define INTEL_AML_CFL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x87CA, info) + +/* CML GT1 */ +#define INTEL_CML_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9BA5, info), \ + INTEL_VGA_DEVICE(0x9BA8, info), \ + INTEL_VGA_DEVICE(0x9BA4, info), \ + INTEL_VGA_DEVICE(0x9BA2, info) + +#define INTEL_CML_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9B21, info), \ + INTEL_VGA_DEVICE(0x9BAA, info), \ + INTEL_VGA_DEVICE(0x9BAC, info) + +/* CML GT2 */ +#define INTEL_CML_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9BC5, info), \ + INTEL_VGA_DEVICE(0x9BC8, info), \ + INTEL_VGA_DEVICE(0x9BC4, info), \ + INTEL_VGA_DEVICE(0x9BC2, info), \ + INTEL_VGA_DEVICE(0x9BC6, info), \ + INTEL_VGA_DEVICE(0x9BE6, info), \ + INTEL_VGA_DEVICE(0x9BF6, info) + +#define INTEL_CML_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9B41, info), \ + INTEL_VGA_DEVICE(0x9BCA, info), \ + INTEL_VGA_DEVICE(0x9BCC, info) + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info), \ + INTEL_AML_KBL_GT2_IDS(info) + +/* CFL S */ +#define INTEL_CFL_S_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ + +#define INTEL_CFL_S_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ + +/* CFL H */ +#define INTEL_CFL_H_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E9C, info) + +#define INTEL_CFL_H_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ +#define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +/* WHL/CFL U GT1 */ +#define INTEL_WHL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +/* WHL/CFL U GT2 */ +#define INTEL_WHL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info) + +/* WHL/CFL U GT3 */ +#define INTEL_WHL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info) + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT1_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info), \ + INTEL_WHL_U_GT1_IDS(info), \ + INTEL_WHL_U_GT2_IDS(info), \ + INTEL_WHL_U_GT3_IDS(info), \ + INTEL_AML_CFL_GT2_IDS(info), \ + INTEL_CML_GT1_IDS(info), \ + INTEL_CML_GT2_IDS(info), \ + INTEL_CML_U_GT1_IDS(info), \ + INTEL_CML_U_GT2_IDS(info) + +/* CNL */ +#define INTEL_CNL_PORT_F_IDS(info) \ + INTEL_VGA_DEVICE(0x5A54, info), \ + INTEL_VGA_DEVICE(0x5A5C, info), \ + INTEL_VGA_DEVICE(0x5A44, info), \ + INTEL_VGA_DEVICE(0x5A4C, info) + +#define INTEL_CNL_IDS(info) \ + INTEL_CNL_PORT_F_IDS(info), \ + INTEL_VGA_DEVICE(0x5A51, info), \ + INTEL_VGA_DEVICE(0x5A59, info), \ + INTEL_VGA_DEVICE(0x5A41, info), \ + INTEL_VGA_DEVICE(0x5A49, info), \ + INTEL_VGA_DEVICE(0x5A52, info), \ + INTEL_VGA_DEVICE(0x5A5A, info), \ + INTEL_VGA_DEVICE(0x5A42, info), \ + INTEL_VGA_DEVICE(0x5A4A, info), \ + INTEL_VGA_DEVICE(0x5A50, info), \ + INTEL_VGA_DEVICE(0x5A40, info) + +/* ICL */ +#define INTEL_ICL_PORT_F_IDS(info) \ + INTEL_VGA_DEVICE(0x8A50, info), \ + INTEL_VGA_DEVICE(0x8A5C, info), \ + INTEL_VGA_DEVICE(0x8A59, info), \ + INTEL_VGA_DEVICE(0x8A58, info), \ + INTEL_VGA_DEVICE(0x8A52, info), \ + INTEL_VGA_DEVICE(0x8A5A, info), \ + INTEL_VGA_DEVICE(0x8A5B, info), \ + INTEL_VGA_DEVICE(0x8A57, info), \ + INTEL_VGA_DEVICE(0x8A56, info), \ + INTEL_VGA_DEVICE(0x8A71, info), \ + INTEL_VGA_DEVICE(0x8A70, info), \ + INTEL_VGA_DEVICE(0x8A53, info), \ + INTEL_VGA_DEVICE(0x8A54, info) + +#define INTEL_ICL_11_IDS(info) \ + INTEL_ICL_PORT_F_IDS(info), \ + INTEL_VGA_DEVICE(0x8A51, info), \ + INTEL_VGA_DEVICE(0x8A5D, info) + +/* EHL/JSL */ +#define INTEL_EHL_IDS(info) \ + INTEL_VGA_DEVICE(0x4500, info), \ + INTEL_VGA_DEVICE(0x4571, info), \ + INTEL_VGA_DEVICE(0x4551, info), \ + INTEL_VGA_DEVICE(0x4541, info), \ + INTEL_VGA_DEVICE(0x4E71, info), \ + INTEL_VGA_DEVICE(0x4557, info), \ + INTEL_VGA_DEVICE(0x4555, info), \ + INTEL_VGA_DEVICE(0x4E61, info), \ + INTEL_VGA_DEVICE(0x4E57, info), \ + INTEL_VGA_DEVICE(0x4E55, info), \ + INTEL_VGA_DEVICE(0x4E51, info) + +/* TGL */ +#define INTEL_TGL_12_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info) + +#define INTEL_TGL_12_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A49, info), \ + INTEL_VGA_DEVICE(0x9A59, info), \ + INTEL_VGA_DEVICE(0x9A78, info), \ + INTEL_VGA_DEVICE(0x9AC0, info), \ + INTEL_VGA_DEVICE(0x9AC9, info), \ + INTEL_VGA_DEVICE(0x9AD9, info), \ + INTEL_VGA_DEVICE(0x9AF8, info) + +#define INTEL_TGL_12_IDS(info) \ + INTEL_TGL_12_GT1_IDS(info), \ + INTEL_TGL_12_GT2_IDS(info) + +/* RKL */ +#define INTEL_RKL_IDS(info) \ + INTEL_VGA_DEVICE(0x4C80, info), \ + INTEL_VGA_DEVICE(0x4C8A, info), \ + INTEL_VGA_DEVICE(0x4C8B, info), \ + INTEL_VGA_DEVICE(0x4C8C, info), \ + INTEL_VGA_DEVICE(0x4C90, info), \ + INTEL_VGA_DEVICE(0x4C9A, info) + +/* DG1 */ +#define INTEL_DG1_IDS(info) \ + INTEL_VGA_DEVICE(0x4905, info) + +#endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 000000000..abfefaaf8 --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Common header for intel-gtt.ko and i915.ko */ + +#ifndef _DRM_INTEL_GTT_H +#define _DRM_INTEL_GTT_H + +#include <linux/agp_backend.h> +#include <linux/intel-iommu.h> +#include <linux/kernel.h> + +void intel_gtt_get(u64 *gtt_total, + phys_addr_t *mappable_base, + resource_size_t *mappable_end); + +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(void); + +bool intel_enable_gtt(void); + +void intel_gtt_chipset_flush(void); +void intel_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags); +void intel_gtt_insert_sg_entries(struct sg_table *st, + unsigned int pg_start, + unsigned int flags); +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); + +/* Special gtt memory types */ +#define AGP_DCACHE_MEMORY 1 +#define AGP_PHYS_MEMORY 2 + +/* flag for GFDT type */ +#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) + +#endif diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel_lpe_audio.h new file mode 100644 index 000000000..b6121c8fe --- /dev/null +++ b/include/drm/intel_lpe_audio.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_LPE_AUDIO_H_ +#define _INTEL_LPE_AUDIO_H_ + +#include <linux/types.h> +#include <linux/spinlock_types.h> + +struct platform_device; + +#define HDMI_MAX_ELD_BYTES 128 + +struct intel_hdmi_lpe_audio_port_pdata { + u8 eld[HDMI_MAX_ELD_BYTES]; + int port; + int pipe; + int ls_clock; + bool dp_output; +}; + +struct intel_hdmi_lpe_audio_pdata { + struct intel_hdmi_lpe_audio_port_pdata port[3]; /* for ports B,C,D */ + int num_ports; + int num_pipes; + + void (*notify_audio_lpe)(struct platform_device *pdev, int port); /* port: 0==B,1==C,2==D */ + spinlock_t lpe_audio_slock; +}; + +#endif /* _I915_LPE_AUDIO_H_ */ diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000..125f096c8 --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include <linux/atomic.h> +#include <linux/preempt.h> + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h new file mode 100644 index 000000000..087e3f649 --- /dev/null +++ b/include/drm/task_barrier.h @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/semaphore.h> +#include <linux/atomic.h> + +/* + * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. + * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ + */ + + + +#ifndef DRM_TASK_BARRIER_H_ +#define DRM_TASK_BARRIER_H_ + +/* + * Represents an instance of a task barrier. + */ +struct task_barrier { + unsigned int n; + atomic_t count; + struct semaphore enter_turnstile; + struct semaphore exit_turnstile; +}; + +static inline void task_barrier_signal_turnstile(struct semaphore *turnstile, + unsigned int n) +{ + int i; + + for (i = 0 ; i < n; i++) + up(turnstile); +} + +static inline void task_barrier_init(struct task_barrier *tb) +{ + tb->n = 0; + atomic_set(&tb->count, 0); + sema_init(&tb->enter_turnstile, 0); + sema_init(&tb->exit_turnstile, 0); +} + +static inline void task_barrier_add_task(struct task_barrier *tb) +{ + tb->n++; +} + +static inline void task_barrier_rem_task(struct task_barrier *tb) +{ + tb->n--; +} + +/* + * Lines up all the threads BEFORE the critical point. + * + * When all thread passed this code the entry barrier is back to locked state. + */ +static inline void task_barrier_enter(struct task_barrier *tb) +{ + if (atomic_inc_return(&tb->count) == tb->n) + task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n); + + down(&tb->enter_turnstile); +} + +/* + * Lines up all the threads AFTER the critical point. + * + * This function is used to avoid any one thread running ahead if the barrier is + * used repeatedly . + */ +static inline void task_barrier_exit(struct task_barrier *tb) +{ + if (atomic_dec_return(&tb->count) == 0) + task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n); + + down(&tb->exit_turnstile); +} + +/* Convinieince function when nothing to be done in between entry and exit */ +static inline void task_barrier_full(struct task_barrier *tb) +{ + task_barrier_enter(tb); + task_barrier_exit(tb); +} + +#endif diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h new file mode 100644 index 000000000..09ccfee48 --- /dev/null +++ b/include/drm/ttm/ttm_bo_api.h @@ -0,0 +1,663 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_BO_API_H_ +#define _TTM_BO_API_H_ + +#include <drm/drm_gem.h> +#include <drm/drm_hashtab.h> +#include <drm/drm_vma_manager.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/bitmap.h> +#include <linux/dma-resv.h> + +#include "ttm_resource.h" + +struct ttm_bo_global; + +struct ttm_bo_device; + +struct drm_mm_node; + +struct ttm_placement; + +struct ttm_place; + +struct ttm_lru_bulk_move; + +/** + * enum ttm_bo_type + * + * @ttm_bo_type_device: These are 'normal' buffers that can + * be mmapped by user space. Each of these bos occupy a slot in the + * device address space, that can be used for normal vm operations. + * + * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, + * but they cannot be accessed from user-space. For kernel-only use. + * + * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another + * driver. + */ + +enum ttm_bo_type { + ttm_bo_type_device, + ttm_bo_type_kernel, + ttm_bo_type_sg +}; + +struct ttm_tt; + +/** + * struct ttm_buffer_object + * + * @base: drm_gem_object superclass data. + * @bdev: Pointer to the buffer object device structure. + * @type: The bo type. + * @destroy: Destruction function. If NULL, kfree is used. + * @num_pages: Actual number of pages. + * @acc_size: Accounted size for this object. + * @kref: Reference count of this buffer object. When this refcount reaches + * zero, the object is destroyed or put on the delayed delete list. + * @mem: structure describing current placement. + * @persistent_swap_storage: Usually the swap storage is deleted for buffers + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. + * @ttm: TTM structure holding system pages. + * @evicted: Whether the object was evicted without user-space knowing. + * @deleted: True if the object is only a zombie and already deleted. + * @lru: List head for the lru list. + * @ddestroy: List head for the delayed destroy list. + * @swap: List head for swap LRU list. + * @moving: Fence set when BO is moving + * @offset: The current GPU offset, which can have different meanings + * depending on the memory type. For SYSTEM type memory, it should be 0. + * @cur_placement: Hint of current placement. + * + * Base class for TTM buffer object, that deals with data placement and CPU + * mappings. GPU mappings are really up to the driver, but for simpler GPUs + * the driver can usually use the placement offset @offset directly as the + * GPU virtual address. For drivers implementing multiple + * GPU memory manager contexts, the driver should manage the address space + * in these contexts separately and use these objects to get the correct + * placement and caching for these GPU maps. This makes it possible to use + * these objects for even quite elaborate memory management schemes. + * The destroy member, the API visibility of this object makes it possible + * to derive driver specific types. + */ + +struct ttm_buffer_object { + struct drm_gem_object base; + + /** + * Members constant at init. + */ + + struct ttm_bo_device *bdev; + enum ttm_bo_type type; + void (*destroy) (struct ttm_buffer_object *); + unsigned long num_pages; + size_t acc_size; + + /** + * Members not needing protection. + */ + struct kref kref; + + /** + * Members protected by the bo::resv::reserved lock. + */ + + struct ttm_resource mem; + struct file *persistent_swap_storage; + struct ttm_tt *ttm; + bool deleted; + + /** + * Members protected by the bdev::lru_lock. + */ + + struct list_head lru; + struct list_head ddestroy; + struct list_head swap; + + /** + * Members protected by a bo reservation. + */ + + struct dma_fence *moving; + unsigned priority; + unsigned pin_count; + + /** + * Special members that are protected by the reserve lock + * and the bo::lock when written to. Can be read with + * either of these locks held. + */ + + struct sg_table *sg; +}; + +/** + * struct ttm_bo_kmap_obj + * + * @virtual: The current kernel virtual address. + * @page: The page when kmap'ing a single page. + * @bo_kmap_type: Type of bo_kmap. + * + * Object describing a kernel mapping. Since a TTM bo may be located + * in various memory types with various caching policies, the + * mapping can either be an ioremap, a vmap, a kmap or part of a + * premapped region. + */ + +#define TTM_BO_MAP_IOMEM_MASK 0x80 +struct ttm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, + ttm_bo_map_vmap = 2, + ttm_bo_map_kmap = 3, + ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, + } bo_kmap_type; + struct ttm_buffer_object *bo; +}; + +/** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @resv: Reservation object to allow reserved evictions with. + * @flags: Including the following flags + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + struct dma_resv *resv; + uint64_t bytes_moved; + uint32_t flags; +}; + +/* Allow eviction of reserved BOs */ +#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1 +/* when serving page fault or suspend, allow alloc anyway */ +#define TTM_OPT_FLAG_FORCE_ALLOC 0x2 + +/** + * ttm_bo_get - reference a struct ttm_buffer_object + * + * @bo: The buffer object. + */ +static inline void ttm_bo_get(struct ttm_buffer_object *bo) +{ + kref_get(&bo->kref); +} + +/** + * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless + * its refcount has already reached zero. + * @bo: The buffer object. + * + * Used to reference a TTM buffer object in lookups where the object is removed + * from the lookup structure during the destructor and for RCU lookups. + * + * Returns: @bo if the referencing was successful, NULL otherwise. + */ +static inline __must_check struct ttm_buffer_object * +ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) +{ + if (!kref_get_unless_zero(&bo->kref)) + return NULL; + return bo; +} + +/** + * ttm_bo_wait - wait for buffer idle. + * + * @bo: The buffer object. + * @interruptible: Use interruptible wait. + * @no_wait: Return immediately if buffer is busy. + * + * This function must be called with the bo::mutex held, and makes + * sure any previous rendering to the buffer is completed. + * Note: It might be necessary to block validations before the + * wait by reserving the buffer. + * Returns -EBUSY if no_wait is true and the buffer is busy. + * Returns -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); + +/** + * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo + * + * @placement: Return immediately if buffer is busy. + * @mem: The struct ttm_resource indicating the region where the bo resides + * @new_flags: Describes compatible placement found + * + * Returns true if the placement is compatible + */ +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem, + uint32_t *new_flags); + +/** + * ttm_bo_validate + * + * @bo: The buffer object. + * @placement: Proposed placement for the buffer object. + * @ctx: validation parameters. + * + * Changes placement and caching policy of the buffer object + * according proposed placement. + * Returns + * -EINVAL on invalid proposed placement. + * -ENOMEM on out-of-memory condition. + * -EBUSY if no_wait is true and buffer busy. + * -ERESTARTSYS if interrupted by a signal. + */ +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); + +/** + * ttm_bo_put + * + * @bo: The buffer object. + * + * Unreference a buffer object. + */ +void ttm_bo_put(struct ttm_buffer_object *bo); + +/** + * ttm_bo_move_to_lru_tail + * + * @bo: The buffer object. + * @bulk: optional bulk move structure to remember BO positions + * + * Move this BO to the tail of all lru lists used to lookup and reserve an + * object. This function must be called with struct ttm_bo_global::lru_lock + * held, and is used to make a BO less likely to be considered for eviction. + */ +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); + +/** + * ttm_bo_bulk_move_lru_tail + * + * @bulk: bulk move structure + * + * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that + * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + */ +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); + +/** + * ttm_bo_lock_delayed_workqueue + * + * Prevent the delayed workqueue from running. + * Returns + * True if the workqueue was queued at the time + */ +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); + +/** + * ttm_bo_unlock_delayed_workqueue + * + * Allows the delayed workqueue to run. + */ +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); + +/** + * ttm_bo_eviction_valuable + * + * @bo: The buffer object to evict + * @place: the placement we need to make room for + * + * Check if it is valuable to evict the BO to make room for the given placement. + */ +bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, + const struct ttm_place *place); + +size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, + unsigned long bo_size, + unsigned struct_size); + +/** + * ttm_bo_init_reserved + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @ctx: TTM operation context for memory allocation. + * @acc_size: Accounted size for this object. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. + * @destroy: Destroy function. Use NULL for kfree(). + * + * This function initializes a pre-allocated struct ttm_buffer_object. + * As this object may be part of a larger structure, this function, + * together with the @destroy function, + * enables driver-specific objects derived from a ttm_buffer_object. + * + * On successful return, the caller owns an object kref to @bo. The kref and + * list_kref are usually set to 1, but note that in some situations, other + * tasks may already be holding references to @bo as well. + * Furthermore, if resv == NULL, the buffer's reservation lock will be held, + * and it is the caller's responsibility to call ttm_bo_unreserve. + * + * If a failure occurs, the function will call the @destroy function, or + * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is + * illegal and will likely cause memory corruption. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. + */ + +int ttm_bo_init_reserved(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + struct ttm_operation_ctx *ctx, + size_t acc_size, + struct sg_table *sg, + struct dma_resv *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_bo_init + * + * @bdev: Pointer to a ttm_bo_device struct. + * @bo: Pointer to a ttm_buffer_object to be initialized. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @flags: Initial placement flags. + * @page_alignment: Data alignment in pages. + * @interruptible: If needing to sleep to wait for GPU resources, + * sleep interruptible. + * pinned in physical memory. If this behaviour is not desired, this member + * holds a pointer to a persistent shmem object. Typically, this would + * point to the shmem object backing a GEM object if TTM is used to back a + * GEM user interface. + * @acc_size: Accounted size for this object. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. + * @destroy: Destroy function. Use NULL for kfree(). + * + * This function initializes a pre-allocated struct ttm_buffer_object. + * As this object may be part of a larger structure, this function, + * together with the @destroy function, + * enables driver-specific objects derived from a ttm_buffer_object. + * + * On successful return, the caller owns an object kref to @bo. The kref and + * list_kref are usually set to 1, but note that in some situations, other + * tasks may already be holding references to @bo as well. + * + * If a failure occurs, the function will call the @destroy function, or + * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is + * illegal and will likely cause memory corruption. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. + */ +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, + unsigned long size, enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, bool interrubtible, size_t acc_size, + struct sg_table *sg, struct dma_resv *resv, + void (*destroy) (struct ttm_buffer_object *)); + +/** + * ttm_bo_create + * + * @bdev: Pointer to a ttm_bo_device struct. + * @size: Requested size of buffer object. + * @type: Requested type of buffer object. + * @placement: Initial placement. + * @page_alignment: Data alignment in pages. + * @interruptible: If needing to sleep while waiting for GPU resources, + * sleep interruptible. + * @p_bo: On successful completion *p_bo points to the created object. + * + * This function allocates a ttm_buffer_object, and then calls ttm_bo_init + * on that object. The destroy function is set to kfree(). + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid placement flags. + * -ERESTARTSYS: Interrupted by signal while waiting for resources. + */ +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t page_alignment, bool interruptible, + struct ttm_buffer_object **p_bo); + +/** + * ttm_bo_evict_mm + * + * @bdev: Pointer to a ttm_bo_device struct. + * @mem_type: The memory type. + * + * Evicts all buffers on the lru list of the memory type. + * This is normally part of a VT switch or an + * out-of-memory-space-due-to-fragmentation handler. + * The caller must make sure that there are no other processes + * currently validating buffers, and can do that by taking the + * struct ttm_bo_device::ttm_lock in write mode. + * + * Returns: + * -EINVAL: Invalid or uninitialized memory type. + * -ERESTARTSYS: The call was interrupted by a signal while waiting to + * evict a buffer. + */ +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); + +/** + * ttm_kmap_obj_virtual + * + * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. + * @is_iomem: Pointer to an integer that on return indicates 1 if the + * virtual map is io memory, 0 if normal memory. + * + * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. + * If *is_iomem is 1 on return, the virtual address points to an io memory area, + * that should strictly be accessed by the iowriteXX() and similar functions. + */ +static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, + bool *is_iomem) +{ + *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); + return map->virtual; +} + +/** + * ttm_bo_kmap + * + * @bo: The buffer object. + * @start_page: The first page to map. + * @num_pages: Number of pages to map. + * @map: pointer to a struct ttm_bo_kmap_obj representing the map. + * + * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the + * data in the buffer object. The ttm_kmap_obj_virtual function can then be + * used to obtain a virtual address to the data. + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_kunmap + * + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_kmap. + */ +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); + +/** + * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. + * + * @vma: vma as input from the fbdev mmap method. + * @bo: The bo backing the address space. + * + * Maps a buffer object. + */ +int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); + +/** + * ttm_bo_mmap - mmap out of the ttm device address space. + * + * @filp: filp as input from the mmap method. + * @vma: vma as input from the mmap method. + * @bdev: Pointer to the ttm_bo_device with the address space manager. + * + * This function is intended to be called by the device mmap method. + * if the device address space is to be backed by the bo manager. + */ +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); + +/** + * ttm_bo_io + * + * @bdev: Pointer to the struct ttm_bo_device. + * @filp: Pointer to the struct file attempting to read / write. + * @wbuf: User-space pointer to address of buffer to write. NULL on read. + * @rbuf: User-space pointer to address of buffer to read into. + * Null on write. + * @count: Number of bytes to read / write. + * @f_pos: Pointer to current file position. + * @write: 1 for read, 0 for write. + * + * This function implements read / write into ttm buffer objects, and is + * intended to + * be called from the fops::read and fops::write method. + * Returns: + * See man (2) write, man(2) read. In particular, + * the function may return -ERESTARTSYS if + * interrupted by a signal. + */ +ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); + +int ttm_bo_swapout(struct ttm_bo_global *glob, + struct ttm_operation_ctx *ctx); +void ttm_bo_swapout_all(void); + +/** + * ttm_bo_uses_embedded_gem_object - check if the given bo uses the + * embedded drm_gem_object. + * + * Most ttm drivers are using gem too, so the embedded + * ttm_buffer_object.base will be initialized by the driver (before + * calling ttm_bo_init). It is also possible to use ttm without gem + * though (vmwgfx does that). + * + * This helper will figure whenever a given ttm bo is a gem object too + * or not. + * + * @bo: The bo to check. + */ +static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) +{ + return bo->base.dev != NULL; +} + +/** + * ttm_bo_pin - Pin the buffer object. + * @bo: The buffer object to pin + * + * Make sure the buffer is not evicted any more during memory pressure. + */ +static inline void ttm_bo_pin(struct ttm_buffer_object *bo) +{ + dma_resv_assert_held(bo->base.resv); + ++bo->pin_count; +} + +/** + * ttm_bo_unpin - Unpin the buffer object. + * @bo: The buffer object to unpin + * + * Allows the buffer object to be evicted again during memory pressure. + */ +static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) +{ + dma_resv_assert_held(bo->base.resv); + if (bo->pin_count) + --bo->pin_count; + else + WARN_ON_ONCE(true); +} + +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket); + +/* Default number of pre-faulted pages in the TTM fault handler */ +#define TTM_BO_VM_NUM_PREFAULT 16 + +vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, + struct vm_fault *vmf); + +vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, + pgprot_t prot, + pgoff_t num_prefault, + pgoff_t fault_page_size); + +vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); + +void ttm_bo_vm_open(struct vm_area_struct *vma); + +void ttm_bo_vm_close(struct vm_area_struct *vma); + +int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + +#endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h new file mode 100644 index 000000000..864afa8f6 --- /dev/null +++ b/include/drm/ttm/ttm_bo_driver.h @@ -0,0 +1,725 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ +#ifndef _TTM_BO_DRIVER_H_ +#define _TTM_BO_DRIVER_H_ + +#include <drm/drm_mm.h> +#include <drm/drm_vma_manager.h> +#include <linux/workqueue.h> +#include <linux/fs.h> +#include <linux/spinlock.h> +#include <linux/dma-resv.h> + +#include "ttm_bo_api.h" +#include "ttm_memory.h" +#include "ttm_module.h" +#include "ttm_placement.h" +#include "ttm_tt.h" + +/** + * struct ttm_bo_driver + * + * @create_ttm_backend_entry: Callback to create a struct ttm_backend. + * @evict_flags: Callback to obtain placement flags when a buffer is evicted. + * @move: Callback for a driver to hook in accelerated functions to + * move a buffer. + * If set to NULL, a potentially slow memcpy() move is used. + */ + +struct ttm_bo_driver { + /** + * ttm_tt_create + * + * @bo: The buffer object to create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, + uint32_t page_flags); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + + /** + * ttm_tt_bind + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * @bo_mem: Pointer to a struct ttm_resource describing the + * memory type and location for binding. + * + * Bind the backend pages into the aperture in the location + * indicated by @bo_mem. This function should be able to handle + * differences between aperture and system page sizes. + */ + int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); + + /** + * ttm_tt_unbind + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Unbind previously bound backend pages. This function should be + * able to handle differences between aperture and system page sizes. + */ + void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + + /** + * ttm_tt_destroy + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member eviction_valuable + * + * @bo: the buffer object to be evicted + * @place: placement we need room for + * + * Check with the driver if it is valuable to evict a BO to make room + * for a certain placement. + */ + bool (*eviction_valuable)(struct ttm_buffer_object *bo, + const struct ttm_place *place); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ + + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @ctx: context for this move with parameters + * @new_mem: the new memory region receiving the buffer + * + * Move a buffer between two memory regions. + */ + int (*move)(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access)(struct ttm_buffer_object *bo, + struct file *filp); + + /** + * Hook to notify driver about a driver move so it + * can do tiling things and book-keeping. + * + * @evict: whether this move is evicting the buffer from the graphics + * address space + */ + void (*move_notify)(struct ttm_buffer_object *bo, + bool evict, + struct ttm_resource *new_mem); + /* notify the driver we are taking a fault on this BO + * and have reserved it */ + int (*fault_reserve_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify)(struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_bo_device *bdev, + struct ttm_resource *mem); + void (*io_mem_free)(struct ttm_bo_device *bdev, + struct ttm_resource *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); + + /** + * struct ttm_bo_driver member del_from_lru_notify + * + * @bo: the buffer object deleted from lru + * + * notify driver that a BO was deleted from LRU. + */ + void (*del_from_lru_notify)(struct ttm_buffer_object *bo); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); +}; + +/** + * struct ttm_bo_global - Buffer object driver global data. + * + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffer object swap. + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ + +extern struct ttm_bo_global { + + /** + * Constant after init. + */ + + struct kobject kobj; + struct page *dummy_read_page; + spinlock_t lru_lock; + + /** + * Protected by ttm_global_mutex. + */ + struct list_head device_list; + + /** + * Protected by the lru_lock. + */ + struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; + + /** + * Internal protection. + */ + atomic_t bo_count; +} ttm_bo_glob; + + +#define TTM_NUM_MEM_TYPES 8 + +/** + * struct ttm_bo_device - Buffer object driver device-specific data. + * + * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. + * @man: An array of resource_managers. + * @vma_manager: Address space manager (pointer) + * lru_lock: Spinlock that protects the buffer+device lru lists and + * ddestroy lists. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + * @no_retry: Don't retry allocation if it fails + * + */ + +struct ttm_bo_device { + + /* + * Constant after bo device init / atomic. + */ + struct list_head device_list; + struct ttm_bo_driver *driver; + /* + * access via ttm_manager_type. + */ + struct ttm_resource_manager sysman; + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; + /* + * Protected by internal locks. + */ + struct drm_vma_offset_manager *vma_manager; + + /* + * Protected by the global:lru lock. + */ + struct list_head ddestroy; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + + struct delayed_work wq; + + bool need_dma32; + + bool no_retry; +}; + +static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, + int mem_type) +{ + return bdev->man_drv[mem_type]; +} + +static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, + int type, + struct ttm_resource_manager *manager) +{ + bdev->man_drv[type] = manager; +} + +/** + * struct ttm_lru_bulk_move_pos + * + * @first: first BO in the bulk move range + * @last: last BO in the bulk move range + * + * Positions for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_buffer_object *first; + struct ttm_buffer_object *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @tt: first/last lru entry for BOs in the TT domain + * @vram: first/last lru entry for BOs in the VRAM domain + * @swap: first/last lru entry for BOs on the swap list + * + * Helper structure for bulk moves on the LRU list. + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; +}; + +/* + * ttm_bo.c + */ + +/** + * ttm_bo_mem_space + * + * @bo: Pointer to a struct ttm_buffer_object. the data of which + * we want to allocate space for. + * @proposed_placement: Proposed new placement for the buffer object. + * @mem: A struct ttm_resource. + * @interruptible: Sleep interruptible when sliping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * + * Allocate memory space for the buffer object pointed to by @bo, using + * the placement flags in @mem, potentially evicting other idle buffer objects. + * This function may sleep while waiting for space to become available. + * Returns: + * -EBUSY: No space available (only if no_wait == 1). + * -ENOMEM: Could not allocate memory for the buffer object, either due to + * fragmentation or concurrent allocators. + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. + */ +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_resource *mem, + struct ttm_operation_ctx *ctx); + +int ttm_bo_device_release(struct ttm_bo_device *bdev); + +/** + * ttm_bo_device_init + * + * @bdev: A pointer to a struct ttm_bo_device to initialize. + * @glob: A pointer to an initialized struct ttm_bo_global. + * @driver: A pointer to a struct ttm_bo_driver set up by the caller. + * @mapping: The address space to use for this bo. + * @vma_manager: A pointer to a vma manager. + * @file_page_offset: Offset into the device address space that is available + * for buffer data. This ensures compatibility with other users of the + * address space. + * + * Initializes a struct ttm_bo_device: + * Returns: + * !0: Failure. + */ +int ttm_bo_device_init(struct ttm_bo_device *bdev, + struct ttm_bo_driver *driver, + struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, + bool need_dma32); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + */ +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); + +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + * + * The caller must take ttm_mem_io_lock before calling this function. + */ +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); + +/** + * ttm_bo_reserve: + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @ticket: ticket used to acquire the ww_mutex. + * + * Locks a buffer object for validation. (Or prevents other processes from + * locking it for validation), while taking a number of measures to prevent + * deadlocks. + * + * Returns: + * -EDEADLK: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EALREADY: Bo already reserved using @ticket. This error code will only + * be returned if @use_ticket is set to true. + */ +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) +{ + int ret = 0; + + if (no_wait) { + bool success; + if (WARN_ON(ticket)) + return -EBUSY; + + success = dma_resv_trylock(bo->base.resv); + return success ? 0 : -EBUSY; + } + + if (interruptible) + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); + else + ret = dma_resv_lock(bo->base.resv, ticket); + if (ret == -EINTR) + return -ERESTARTSYS; + return ret; +} + +/** + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, + struct ww_acquire_ctx *ticket) +{ + if (interruptible) { + int ret = dma_resv_lock_slow_interruptible(bo->base.resv, + ticket); + if (ret == -EINTR) + ret = -ERESTARTSYS; + return ret; + } + dma_resv_lock_slow(bo->base.resv, ticket); + return 0; +} + +static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) +{ + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail(bo, NULL); + spin_unlock(&ttm_bo_glob.lru_lock); +} + +static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + bo->mem = *new_mem; + new_mem->mm_node = NULL; +} + +/** + * ttm_bo_move_null = assign memory for a buffer object. + * @bo: The bo to assign the memory to + * @new_mem: The memory to be assigned. + * + * Assign the memory from new_mem to the memory of the buffer object bo. + */ +static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + struct ttm_resource *old_mem = &bo->mem; + + WARN_ON(old_mem->mm_node != NULL); + ttm_bo_assign_mem(bo, new_mem); +} + +/** + * ttm_bo_unreserve + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + */ +static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) +{ + ttm_bo_move_to_lru_tail_unlocked(bo); + dma_resv_unlock(bo->base.resv); +} + +/* + * ttm_bo_util.c + */ + +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_resource *mem); +void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_resource *mem); +/** + * ttm_bo_move_ttm + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_resource indicating where to move. + * + * Optimized move function for a buffer object with both old and + * new placement backed by a TTM. The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem); + +/** + * ttm_bo_move_memcpy + * + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @new_mem: struct ttm_resource indicating where to move. + * + * Fallback move function for a mappable buffer object in mappable memory. + * The function will, if successful, + * free any old aperture space, and set (@new_mem)->mm_node to NULL, + * and update the (@bo)->mem placement flags. If unsuccessful, the old + * data remains untouched, and it's up to the caller to free the + * memory space indicated by @new_mem. + * Returns: + * !0: Failure. + */ + +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem); + +/** + * ttm_bo_free_old_node + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Utility function to free an old placement after a successful move. + */ +void ttm_bo_free_old_node(struct ttm_buffer_object *bo); + +/** + * ttm_bo_move_accel_cleanup. + * + * @bo: A pointer to a struct ttm_buffer_object. + * @fence: A fence object that signals when moving is complete. + * @evict: This is an evict move. Don't return until the buffer is idle. + * @pipeline: evictions are to be pipelined. + * @new_mem: struct ttm_resource indicating where to move. + * + * Accelerated move function to be called when an accelerated move + * has been scheduled. The function will create a new temporary buffer object + * representing the old placement, and put the sync object on both buffer + * objects. After that the newly created buffer object is unref'd to be + * destroyed when the move is complete. This will help pipeline + * buffer moves. + */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + bool pipeline, + struct ttm_resource *new_mem); + +/** + * ttm_bo_pipeline_gutting. + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Pipelined gutting a BO of its backing store. + */ +int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); + +/** + * ttm_io_prot + * + * @c_state: Caching state. + * @tmp: Page protection flag for a normal, cached mapping. + * + * Utility function that returns the pgprot_t that should be used for + * setting up a PTE with the caching model indicated by @c_state. + */ +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); + +/** + * ttm_bo_tt_bind + * + * Bind the object tt to a memory resource. + */ +int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); + +/** + * ttm_bo_tt_bind + * + * Unbind the object tt from a memory resource. + */ +void ttm_bo_tt_unbind(struct ttm_buffer_object *bo); + +/** + * ttm_bo_tt_destroy. + */ +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); + +/** + * ttm_range_man_init + * + * @bdev: ttm device + * @type: memory manager type + * @use_tt: if the memory manager uses tt + * @p_size: size of area to be managed in pages. + * + * Initialise a generic range manager for the selected memory type. + * The range manager is installed for this device in the type slot. + */ +int ttm_range_man_init(struct ttm_bo_device *bdev, + unsigned type, bool use_tt, + unsigned long p_size); + +/** + * ttm_range_man_fini + * + * @bdev: ttm device + * @type: memory manager type + * + * Remove the generic range manager from a slot and tear it down. + */ +int ttm_range_man_fini(struct ttm_bo_device *bdev, + unsigned type); + +#endif diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h new file mode 100644 index 000000000..a99d7fdf2 --- /dev/null +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -0,0 +1,118 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_EXECBUF_UTIL_H_ +#define _TTM_EXECBUF_UTIL_H_ + +#include <linux/list.h> + +#include "ttm_bo_api.h" + +/** + * struct ttm_validate_buffer + * + * @head: list head for thread-private list. + * @bo: refcounted buffer object pointer. + * @num_shared: How many shared fences we want to add. + */ + +struct ttm_validate_buffer { + struct list_head head; + struct ttm_buffer_object *bo; + unsigned int num_shared; +}; + +/** + * function ttm_eu_backoff_reservation + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * + * Undoes all buffer validation reservations for bos pointed to by + * the list entries. + */ +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list); + +/** + * function ttm_eu_reserve_buffers + * + * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only + * non-blocking reserves should be tried. + * @list: thread private list of ttm_validate_buffer structs. + * @intr: should the wait be interruptible + * @dups: [out] optional list of duplicates. + * @del_lru: true if BOs should be removed from the LRU. + * + * Tries to reserve bos pointed to by the list entries for validation. + * If the function returns 0, all buffers are marked as "unfenced", + * taken off the lru lists and are not synced for write CPU usage. + * + * If the function detects a deadlock due to multiple threads trying to + * reserve the same buffers in reverse order, all threads except one will + * back off and retry. This function may sleep while waiting for + * CPU write reservations to be cleared, and for other threads to + * unreserve their buffers. + * + * If intr is set to true, this function may return -ERESTARTSYS if the + * calling process receives a signal while waiting. In that case, no + * buffers on the list will be reserved upon return. + * + * If dups is non NULL all buffers already reserved by the current thread + * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned + * on the first already reserved buffer and all buffers from the list are + * unreserved again. + * + * Buffers reserved by this function should be unreserved by + * a call to either ttm_eu_backoff_reservation() or + * ttm_eu_fence_buffer_objects() when command submission is complete or + * has failed. + */ +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups); + +/** + * function ttm_eu_fence_buffer_objects. + * + * @ticket: ww_acquire_ctx from reserve call + * @list: thread private list of ttm_validate_buffer structs. + * @fence: The new exclusive fence for the buffers. + * + * This function should be called when command submission is complete, and + * it will add a new sync object to bos pointed to by entries on @list. + * It also unreserves all buffers, putting them on lru lists. + * + */ +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, + struct dma_fence *fence); + +#endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h new file mode 100644 index 000000000..c1f167881 --- /dev/null +++ b/include/drm/ttm/ttm_memory.h @@ -0,0 +1,95 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef TTM_MEMORY_H +#define TTM_MEMORY_H + +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/bug.h> +#include <linux/wait.h> +#include <linux/errno.h> +#include <linux/kobject.h> +#include <linux/mm.h> +#include "ttm_bo_api.h" + +/** + * struct ttm_mem_global - Global memory accounting structure. + * + * @shrink: A single callback to shrink TTM memory usage. Extend this + * to a linked list to be able to handle multiple callbacks when needed. + * @swap_queue: A workqueue to handle shrinking in low memory situations. We + * need a separate workqueue since it will spend a lot of time waiting + * for the GPU, and this will otherwise block other workqueue tasks(?) + * At this point we use only a single-threaded workqueue. + * @work: The workqueue callback for the shrink queue. + * @lock: Lock to protect the @shrink - and the memory accounting members, + * that is, essentially the whole structure with some exceptions. + * @lower_mem_limit: include lower limit of swap space and lower limit of + * system memory. + * @zones: Array of pointers to accounting zones. + * @num_zones: Number of populated entries in the @zones array. + * @zone_kernel: Pointer to the kernel zone. + * @zone_highmem: Pointer to the highmem zone if there is one. + * @zone_dma32: Pointer to the dma32 zone if there is one. + * + * Note that this structure is not per device. It should be global for all + * graphics devices. + */ + +#define TTM_MEM_MAX_ZONES 2 +struct ttm_mem_zone; +extern struct ttm_mem_global { + struct kobject kobj; + struct workqueue_struct *swap_queue; + struct work_struct work; + spinlock_t lock; + uint64_t lower_mem_limit; + struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; + unsigned int num_zones; + struct ttm_mem_zone *zone_kernel; +#ifdef CONFIG_HIGHMEM + struct ttm_mem_zone *zone_highmem; +#else + struct ttm_mem_zone *zone_dma32; +#endif +} ttm_mem_glob; + +int ttm_mem_global_init(struct ttm_mem_global *glob); +void ttm_mem_global_release(struct ttm_mem_global *glob); +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size); +size_t ttm_round_pot(size_t size); +bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, + struct ttm_operation_ctx *ctx); +#endif diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h new file mode 100644 index 000000000..45fa318c1 --- /dev/null +++ b/include/drm/ttm/ttm_module.h @@ -0,0 +1,40 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_MODULE_H_ +#define _TTM_MODULE_H_ + +#include <linux/kernel.h> +struct kobject; + +#define TTM_PFX "[TTM] " +extern struct kobject *ttm_get_kobj(void); + +#endif /* _TTM_MODULE_H_ */ diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h new file mode 100644 index 000000000..a6b6ef5f9 --- /dev/null +++ b/include/drm/ttm/ttm_page_alloc.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) Red Hat Inc. + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie <airlied@redhat.com> + * Jerome Glisse <jglisse@redhat.com> + */ +#ifndef TTM_PAGE_ALLOC +#define TTM_PAGE_ALLOC + +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_memory.h> + +struct device; + +/** + * Initialize pool allocator. + */ +int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); +/** + * Free pool allocator. + */ +void ttm_page_alloc_fini(void); + +/** + * ttm_pool_populate: + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Add backing pages to all of @ttm + */ +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); + +/** + * ttm_pool_unpopulate: + * + * @ttm: The struct ttm_tt which to free backing pages. + * + * Free all pages of @ttm + */ +void ttm_pool_unpopulate(struct ttm_tt *ttm); + +/** + * Populates and DMA maps pages to fullfil a ttm_dma_populate() request + */ +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); + +/** + * Unpopulates and DMA unmaps pages as part of a + * ttm_dma_unpopulate() request */ +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); + +/** + * Output the state of pools to debugfs file + */ +int ttm_page_alloc_debugfs(struct seq_file *m, void *data); + +#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL) +/** + * Initialize pool allocator. + */ +int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); + +/** + * Free pool allocator. + */ +void ttm_dma_page_alloc_fini(void); + +/** + * Output the state of pools to debugfs file + */ +int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); + +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); +void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); + +#else +static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, + unsigned max_pages) +{ + return -ENODEV; +} + +static inline void ttm_dma_page_alloc_fini(void) { return; } + +static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) +{ + return 0; +} +static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, + struct device *dev, + struct ttm_operation_ctx *ctx) +{ + return -ENOMEM; +} +static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, + struct device *dev) +{ +} +#endif + +#endif diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h new file mode 100644 index 000000000..d4022655e --- /dev/null +++ b/include/drm/ttm/ttm_placement.h @@ -0,0 +1,101 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_PLACEMENT_H_ +#define _TTM_PLACEMENT_H_ + +#include <linux/types.h> + +/* + * Memory regions for data placement. + */ + +#define TTM_PL_SYSTEM 0 +#define TTM_PL_TT 1 +#define TTM_PL_VRAM 2 +#define TTM_PL_PRIV 3 + +/* + * Other flags that affects data placement. + * TTM_PL_FLAG_CACHED indicates cache-coherent mappings + * if available. + * TTM_PL_FLAG_SHARED means that another application may + * reference the buffer. + * TTM_PL_FLAG_NO_EVICT means that the buffer may never + * be evicted to make room for other buffers. + * TTM_PL_FLAG_TOPDOWN requests to be placed from the + * top of the memory area, instead of the bottom. + */ + +#define TTM_PL_FLAG_CACHED (1 << 16) +#define TTM_PL_FLAG_UNCACHED (1 << 17) +#define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_CONTIGUOUS (1 << 19) +#define TTM_PL_FLAG_NO_EVICT (1 << 21) +#define TTM_PL_FLAG_TOPDOWN (1 << 22) + +#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ + TTM_PL_FLAG_UNCACHED | \ + TTM_PL_FLAG_WC) + +/** + * struct ttm_place + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @flags: memory domain and caching flags for the object + * + * Structure indicating a possible place to put an object. + */ +struct ttm_place { + unsigned fpfn; + unsigned lpfn; + uint32_t mem_type; + uint32_t flags; +}; + +/** + * struct ttm_placement + * + * @num_placement: number of preferred placements + * @placement: preferred placements + * @num_busy_placement: number of preferred placements when need to evict buffer + * @busy_placement: preferred placements when need to evict buffer + * + * Structure indicating the placement you request for an object. + */ +struct ttm_placement { + unsigned num_placement; + const struct ttm_place *placement; + unsigned num_busy_placement; + const struct ttm_place *busy_placement; +}; + +#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h new file mode 100644 index 000000000..0e172d94a --- /dev/null +++ b/include/drm/ttm/ttm_resource.h @@ -0,0 +1,237 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_RESOURCE_H_ +#define _TTM_RESOURCE_H_ + +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/dma-fence.h> +#include <drm/drm_print.h> + +#define TTM_MAX_BO_PRIORITY 4U + +struct ttm_bo_device; +struct ttm_resource_manager; +struct ttm_resource; +struct ttm_place; +struct ttm_buffer_object; + +struct ttm_resource_manager_func { + /** + * struct ttm_resource_manager_func member alloc + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @placement: Placement details. + * @flags: Additional placement flags. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if + * applicable are given by @placement. If successful, + * @mem::mm_node should be set to a non-null value, and + * @mem::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the memory region accommodate the buffer object, @mem::mm_node + * should be set to NULL, and the function should return 0. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * Note that @mem::mm_node will only be dereferenced by + * struct ttm_resource_manager functions and optionally by the driver, + * which has knowledge of the underlying type. + * + * This function may not be called from within atomic context, so + * an implementation can and must use either a mutex or a spinlock to + * protect any data structures managing the space. + */ + int (*alloc)(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member free + * + * @man: Pointer to a memory type manager. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function frees memory type resources previously allocated + * and that are identified by @mem::mm_node and @mem::start. May not + * be called from within atomic context. + */ + void (*free)(struct ttm_resource_manager *man, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member debug + * + * @man: Pointer to a memory type manager. + * @printer: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_resource_manager *man, + struct drm_printer *printer); +}; + +/** + * struct ttm_resource_manager + * + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @func: structure pointer implementing the range manager. See above + * @move_lock: lock for move fence + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * @move: The fence of the last pipelined move operation. + * + * This structure is used to identify and manage memory types for a device. + */ +struct ttm_resource_manager { + /* + * No protection. Constant from start. + */ + bool use_type; + bool use_tt; + uint64_t size; + const struct ttm_resource_manager_func *func; + spinlock_t move_lock; + + /* + * Protected by the global->lru_lock. + */ + + struct list_head lru[TTM_MAX_BO_PRIORITY]; + + /* + * Protected by @move_lock. + */ + struct dma_fence *move; +}; + +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @offset: physical addr + * @is_iomem: is this io memory ? + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t offset; + bool is_iomem; +}; + +/** + * struct ttm_resource + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ +struct ttm_resource { + void *mm_node; + unsigned long start; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; +}; + +/** + * ttm_resource_manager_set_used + * + * @man: A memory manager object. + * @used: usage state to set. + * + * Set the manager in use flag. If disabled the manager is no longer + * used for object placement. + */ +static inline void +ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) +{ + man->use_type = used; +} + +/** + * ttm_resource_manager_used + * + * @man: Manager to get used state for + * + * Get the in use flag for a manager. + * Returns: + * true is used, false if not. + */ +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) +{ + return man->use_type; +} + +/** + * ttm_resource_manager_cleanup + * + * @man: A memory manager object. + * + * Cleanup the move fences from the memory manager object. + */ +static inline void +ttm_resource_manager_cleanup(struct ttm_resource_manager *man) +{ + dma_fence_put(man->move); + man->move = NULL; +} + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size); + +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); + +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p); + +#endif diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h new file mode 100644 index 000000000..7c492b49e --- /dev/null +++ b/include/drm/ttm/ttm_set_memory.h @@ -0,0 +1,150 @@ +/************************************************************************** + * + * Copyright (c) 2018 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Huang Rui <ray.huang@amd.com> + */ + +#ifndef TTM_SET_MEMORY +#define TTM_SET_MEMORY + +#include <linux/mm.h> + +#ifdef CONFIG_X86 + +#include <asm/set_memory.h> + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + return set_pages_array_wb(pages, addrinarray); +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + return set_pages_array_wc(pages, addrinarray); +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + return set_pages_array_uc(pages, addrinarray); +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + return set_pages_wb(page, numpages); +} + +static inline int ttm_set_pages_wc(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_wc(addr, numpages); +} + +static inline int ttm_set_pages_uc(struct page *page, int numpages) +{ + return set_pages_uc(page, numpages); +} + +#else /* for CONFIG_X86 */ + +#if IS_ENABLED(CONFIG_AGP) + +#include <asm/agp.h> + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + unmap_page_from_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + int i; + + for (i = 0; i < addrinarray; i++) + map_page_into_agp(pages[i]); + return 0; +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + int i; + + for (i = 0; i < numpages; i++) + unmap_page_from_agp(page++); + return 0; +} + +#else /* for CONFIG_AGP */ + +static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) +{ + return 0; +} + +static inline int ttm_set_pages_wb(struct page *page, int numpages) +{ + return 0; +} + +#endif /* for CONFIG_AGP */ + +static inline int ttm_set_pages_wc(struct page *page, int numpages) +{ + return 0; +} + +static inline int ttm_set_pages_uc(struct page *page, int numpages) +{ + return 0; +} + +#endif /* for CONFIG_X86 */ + +#endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h new file mode 100644 index 000000000..75208c0a0 --- /dev/null +++ b/include/drm/ttm/ttm_tt.h @@ -0,0 +1,232 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#ifndef _TTM_TT_H_ +#define _TTM_TT_H_ + +#include <linux/types.h> + +struct ttm_tt; +struct ttm_resource; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +#define TTM_PAGE_FLAG_WRITE (1 << 3) +#define TTM_PAGE_FLAG_SWAPPED (1 << 4) +#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) +#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) +#define TTM_PAGE_FLAG_DMA32 (1 << 7) +#define TTM_PAGE_FLAG_SG (1 << 8) +#define TTM_PAGE_FLAG_NO_RETRY (1 << 9) + +#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31) + +enum ttm_caching_state { + tt_uncached, + tt_wc, + tt_cached +}; + +/** + * struct ttm_tt + * + * @pages: Array of pages backing the data. + * @num_pages: Number of pages in the page array. + * @bdev: Pointer to the current struct ttm_bo_device. + * @be: Pointer to the ttm backend. + * @swap_storage: Pointer to shmem struct file for swap storage. + * @caching_state: The current caching state of the pages. + * @state: The current binding state of the pages. + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_tt { + struct page **pages; + uint32_t page_flags; + unsigned long num_pages; + struct sg_table *sg; /* for SG objects via dma-buf */ + struct file *swap_storage; + enum ttm_caching_state caching_state; +}; + +static inline bool ttm_tt_is_populated(struct ttm_tt *tt) +{ + return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; +} + +static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt) +{ + tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; +} + +static inline void ttm_tt_set_populated(struct ttm_tt *tt) +{ + tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; +} + +/** + * struct ttm_dma_tt + * + * @ttm: Base ttm_tt struct. + * @dma_address: The DMA (bus) addresses of the pages + * @pages_list: used by some page allocation backend + * + * This is a structure holding the pages, caching- and aperture binding + * status for a buffer object that isn't backed by fixed (VRAM / AGP) + * memory. + */ +struct ttm_dma_tt { + struct ttm_tt ttm; + dma_addr_t *dma_address; + struct list_head pages_list; +}; + +/** + * ttm_tt_create + * + * @bo: pointer to a struct ttm_buffer_object + * @zero_alloc: true if allocated pages needs to be zeroed + * + * Make sure we have a TTM structure allocated for the given BO. + * No pages are actually allocated. + */ +int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); + +/** + * ttm_tt_init + * + * @ttm: The struct ttm_tt. + * @bo: The buffer object we create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, + uint32_t page_flags); +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, + uint32_t page_flags); +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, + uint32_t page_flags); + +/** + * ttm_tt_fini + * + * @ttm: the ttm_tt structure. + * + * Free memory of ttm_tt structure + */ +void ttm_tt_fini(struct ttm_tt *ttm); +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); + +/** + * ttm_ttm_destroy: + * + * @ttm: The struct ttm_tt. + * + * Unbind, unpopulate and destroy common struct ttm_tt. + */ +void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + +/** + * ttm_tt_destroy_common: + * + * Called from driver to destroy common path. + */ +void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + +/** + * ttm_tt_swapin: + * + * @ttm: The struct ttm_tt. + * + * Swap in a previously swap out ttm_tt. + */ +int ttm_tt_swapin(struct ttm_tt *ttm); + +/** + * ttm_tt_set_placement_caching: + * + * @ttm A struct ttm_tt the backing pages of which will change caching policy. + * @placement: Flag indicating the desired caching policy. + * + * This function will change caching policy of any default kernel mappings of + * the pages backing @ttm. If changing from cached to uncached or + * write-combined, + * all CPU caches will first be flushed to make sure the data of the pages + * hit RAM. This function may be very costly as it involves global TLB + * and cache flushes and potential page splitting / combining. + */ +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage); + +/** + * ttm_tt_populate - allocate pages for a ttm + * + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to allocate pages for a ttm + */ +int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); + +/** + * ttm_tt_unpopulate - free pages from a ttm + * + * @ttm: Pointer to the ttm_tt structure + * + * Calls the driver method to free all pages from a ttm + */ +void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + +#if IS_ENABLED(CONFIG_AGP) +#include <linux/agp_backend.h> + +/** + * ttm_agp_tt_create + * + * @bo: Buffer object we allocate the ttm for. + * @bridge: The agp bridge this device is sitting on. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * + * Create a TTM backend that uses the indicated AGP bridge as an aperture + * for TT memory. This function uses the linux agpgart interface to + * bind and unbind memory backing a ttm_tt. + */ +struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, + struct agp_bridge_data *bridge, + uint32_t page_flags); +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem); +void ttm_agp_unbind(struct ttm_tt *ttm); +void ttm_agp_destroy(struct ttm_tt *ttm); +bool ttm_agp_is_bound(struct ttm_tt *ttm); +#endif + +#endif |