diff options
| author | Rob Clark <[email protected]> | 2023-12-10 10:07:54 -0800 |
|---|---|---|
| committer | Rob Clark <[email protected]> | 2023-12-10 10:07:54 -0800 |
| commit | cbaf84e73811ed0ff7ff6d7f52b73fd7ed082d65 (patch) | |
| tree | 307e1e7bd42f3b451182f3a548f2169aef3c6ddf /include | |
| parent | a08935fc859b22884dcb6b5126d3a986467101ce (diff) | |
| parent | fca9448ae2f5ddebd841c727ee86136e1b5cbd86 (diff) | |
Merge remote-tracking branch 'drm-misc/drm-misc-next' into msm-next
Backmerge drm-misc-next to pick up some dependencies for drm/msm
patches, in particular:
https://patchwork.freedesktop.org/patch/570219/?series=127251&rev=1
https://patchwork.freedesktop.org/series/123411/
Signed-off-by: Rob Clark <[email protected]>
Diffstat (limited to 'include')
65 files changed, 2876 insertions, 804 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index afeed6e72049..1216d72c650f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -542,6 +542,7 @@ int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); void acpi_device_fix_up_power_extended(struct acpi_device *adev); +void acpi_device_fix_up_power_children(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 995513fa2690..0655aa5b57b2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) { - return !atomic_read(&lock.val); + return !lock.val.counter; } /** diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h new file mode 100644 index 000000000000..c4c423e97f06 --- /dev/null +++ b/include/drm/bridge/aux-bridge.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov <[email protected]> + */ +#ifndef DRM_AUX_BRIDGE_H +#define DRM_AUX_BRIDGE_H + +#include <drm/drm_connector.h> + +#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE) +int drm_aux_bridge_register(struct device *parent); +#else +static inline int drm_aux_bridge_register(struct device *parent) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE) +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np); +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status); +#else +static inline struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + return NULL; +} + +static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ +} +#endif + +#endif diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index e69cece404b3..3731828825bd 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -148,6 +148,7 @@ #define DP_RECEIVE_PORT_0_CAP_0 0x008 # define DP_LOCAL_EDID_PRESENT (1 << 1) # define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) +# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3) #define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 @@ -543,6 +544,10 @@ /* DFP Capability Extension */ #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ +#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_SUPPORT (1 << 0) +# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1) + /* Link Configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ @@ -646,6 +651,9 @@ # define DP_LINK_QUAL_PATTERN_PRSBS31 0x38 # define DP_LINK_QUAL_PATTERN_CUSTOM 0x40 # define DP_LINK_QUAL_PATTERN_SQUARE 0x48 +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49 +# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a +# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b #define DP_TRAINING_LANE0_1_SET2 0x10f #define DP_TRAINING_LANE2_3_SET2 0x110 @@ -699,6 +707,7 @@ #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ # define DP_DECOMPRESSION_EN (1 << 0) +# define DP_DSC_PASSTHROUGH_EN (1 << 1) #define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ @@ -716,6 +725,13 @@ #define DP_BRANCH_DEVICE_CTRL 0x1a1 # define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) +#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */ +# define DP_PANEL_REPLAY_ENABLE (1 << 0) +# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4) +# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5) +# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6) + #define DP_PAYLOAD_ALLOCATE_SET 0x1c0 #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 #define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 @@ -1105,6 +1121,18 @@ #define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ #define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ +#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/ +# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0) +# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1) +# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) + +#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */ +# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0) +# define DP_SINK_FRAME_LOCKED_SHIFT 3 +# define DP_SINK_FRAME_LOCKED_MASK (3 << 3) +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5 +# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5) + /* Extended Receiver Capability: See DP_DPCD_REV for definitions */ #define DP_DP13_DPCD_REV 0x2200 diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 3d74b2cec72f..863b2e7add29 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -164,6 +164,7 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } /* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], bool is_edp); u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); @@ -251,6 +252,19 @@ drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); } +/** + * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR + * @link_rate: link rate in 10kbits/s units + * + * Determine if the provided link rate is an UHBR rate. + * + * Returns: %True if @link_rate is an UHBR rate. + */ +static inline bool drm_dp_is_uhbr_rate(int link_rate) +{ + return link_rate >= 1000000; +} + /* * DisplayPort AUX channel */ @@ -632,6 +646,13 @@ enum drm_dp_quirk { * the DP_MAX_LINK_RATE register reporting a lower max multiplier. */ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, + /** + * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC: + * + * The device applies HBLANK expansion for some modes, but this + * requires enabling DSC. + */ + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC, }; /** @@ -781,4 +802,15 @@ bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZ const u8 port_cap[4], u8 color_spc); int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); +#define DRM_DP_BW_OVERHEAD_MST BIT(0) +#define DRM_DP_BW_OVERHEAD_UHBR BIT(1) +#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2) +#define DRM_DP_BW_OVERHEAD_FEC BIT(3) +#define DRM_DP_BW_OVERHEAD_DSC BIT(4) + +int drm_dp_bw_overhead(int lane_count, int hactive, + int dsc_slice_count, + int bpp_x16, unsigned long flags); +int drm_dp_bw_channel_coding_efficiency(bool is_uhbr); + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 4429d3b1745b..9b19d8bd520a 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -25,6 +25,7 @@ #include <linux/types.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> +#include <drm/drm_fixed.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) #include <linux/stackdepot.h> @@ -617,7 +618,7 @@ struct drm_dp_mst_topology_state { * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this * out itself. */ - int pbn_div; + fixed20_12 pbn_div; }; #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) @@ -839,10 +840,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); -int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count); +fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count); -int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); +int drm_dp_calc_pbn_mode(int clock, int bpp); void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); @@ -892,6 +893,9 @@ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload * drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_port *parent); int __must_check drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, @@ -913,6 +917,10 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, struct drm_dp_query_stream_enc_status_ack_reply *status); +int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_port **failing_port); int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 536a0b0091c3..fea528aacfe2 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -49,9 +49,8 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); -int -drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, - struct drm_connector_state *conn_state); +int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, + struct drm_atomic_state *state); int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state, int min_scale, diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index ba248ca8866f..50131383ed81 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,24 +33,6 @@ #include <linux/wait.h> struct drm_file; -struct drm_hw_lock; - -/* - * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for - * include ordering reasons. - * - * DO NOT USE. - */ -struct drm_lock_data { - struct drm_hw_lock *hw_lock; - struct drm_file *file_priv; - wait_queue_head_t lock_queue; - unsigned long lock_time; - spinlock_t spinlock; - uint32_t kernel_waiters; - uint32_t user_waiters; - int idle_has_lock; -}; /** * struct drm_master - drm master structure @@ -145,10 +127,6 @@ struct drm_master { * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex. */ struct idr lessee_idr; - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - struct drm_lock_data lock; -#endif }; struct drm_master *drm_master_get(struct drm_master *master); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index cfb7dcdb66c4..e39da5807ba7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -194,7 +194,7 @@ struct drm_bridge_funcs { * or &drm_encoder_helper_funcs.dpms hook. * * The bridge must assume that the display pipe (i.e. clocks and timing - * singals) feeding it is no longer running when this callback is + * signals) feeding it is no longer running when this callback is * called. * * The @post_disable callback is optional. @@ -950,6 +950,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif -void drm_bridge_debugfs_init(struct drm_device *dev); - #endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488b0c..54b2b2467bfd 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -36,20 +36,17 @@ struct drm_plane; * * Extract a degamma/gamma LUT value provided by user (in the form of * &drm_color_lut entries) and round it to the precision supported by the - * hardware. + * hardware, following OpenGL int<->float conversion rules + * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions). */ static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) { - u32 val = user_input; - u32 max = 0xffff >> (16 - bit_precision); - - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } - - return clamp_val(val, 0, max); + if (bit_precision > 16) + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1), + (1 << 16) - 1); + else + return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1), + (1 << 16) - 1); } u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index c490977ee250..63767cf24371 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -6,7 +6,6 @@ #include <linux/mutex.h> #include <linux/idr.h> -#include <drm/drm_legacy.h> #include <drm/drm_mode_config.h> struct drm_driver; @@ -153,8 +152,8 @@ struct drm_device { * * Lock for others (not &drm_minor.master and &drm_file.is_master) * - * WARNING: - * Only drivers annotated with DRIVER_LEGACY should be using this. + * TODO: This lock used to be the BKL of the DRM subsystem. Move the + * lock into i915, which is the only remaining user. */ struct mutex struct_mutex; @@ -317,72 +316,6 @@ struct drm_device { * Root directory for debugfs files. */ struct dentry *debugfs_root; - - /* Everything below here is for legacy driver, never use! */ - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - /* List of devices per driver for stealth attach cleanup */ - struct list_head legacy_dev_list; - -#ifdef __alpha__ - /** @hose: PCI hose, only used on ALPHA platforms. */ - struct pci_controller *hose; -#endif - - /* AGP data */ - struct drm_agp_head *agp; - - /* Context handle management - linked list of context handles */ - struct list_head ctxlist; - - /* Context handle management - mutex for &ctxlist */ - struct mutex ctxlist_mutex; - - /* Context handle management */ - struct idr ctx_idr; - - /* Memory management - linked list of regions */ - struct list_head maplist; - - /* Memory management - user token hash table for maps */ - struct drm_open_hash map_hash; - - /* Context handle management - list of vmas (for debugging) */ - struct list_head vmalist; - - /* Optional pointer for DMA support */ - struct drm_device_dma *dma; - - /* Context swapping flag */ - __volatile__ long context_flag; - - /* Last current context */ - int last_context; - - /* Lock for &buf_use and a few other things. */ - spinlock_t buf_lock; - - /* Usage counter for buffers in use -- cannot alloc */ - int buf_use; - - /* Buffer allocation in progress */ - atomic_t buf_alloc; - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - - /* Scatter gather memory */ - struct drm_sg_mem *sg; - - /* IRQs */ - bool irq_enabled; - int irq; -#endif }; #endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index e2640dc64e08..8878260d7529 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -110,6 +110,15 @@ enum drm_driver_feature { * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), + /** + * @DRIVER_CURSOR_HOTSPOT: + * + * Driver supports and requires cursor hotspot information in the + * cursor plane (e.g. cursor plane has to actually track the mouse + * cursor and the clients are required to set hotspot in order for + * the cursor planes to work correctly). + */ + DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ @@ -433,25 +442,6 @@ struct drm_driver { * some examples. */ const struct file_operations *fops; - -#ifdef CONFIG_DRM_LEGACY - /* Everything below here is for legacy driver, never use! */ - /* private: */ - - int (*firstopen) (struct drm_device *); - void (*preclose) (struct drm_device *, struct drm_file *file_priv); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); - irqreturn_t (*irq_handler)(int irq, void *arg); - void (*irq_preinstall)(struct drm_device *dev); - int (*irq_postinstall)(struct drm_device *dev); - void (*irq_uninstall)(struct drm_device *dev); - u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); - int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); - void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); - int dev_priv_size; -#endif }; void *__devm_drm_dev_alloc(struct device *parent, diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 882d2638708e..518d1b8106c7 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -269,64 +269,6 @@ struct detailed_timing { #define DRM_EDID_DSC_MAX_SLICES 0xf #define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f -/* ELD Header Block */ -#define DRM_ELD_HEADER_BLOCK_SIZE 4 - -#define DRM_ELD_VER 0 -# define DRM_ELD_VER_SHIFT 3 -# define DRM_ELD_VER_MASK (0x1f << 3) -# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ -# define DRM_ELD_VER_CANNED (0x1f << 3) - -#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ - -/* ELD Baseline Block for ELD_Ver == 2 */ -#define DRM_ELD_CEA_EDID_VER_MNL 4 -# define DRM_ELD_CEA_EDID_VER_SHIFT 5 -# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) -# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) -# define DRM_ELD_MNL_SHIFT 0 -# define DRM_ELD_MNL_MASK (0x1f << 0) - -#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 -# define DRM_ELD_SAD_COUNT_SHIFT 4 -# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) -# define DRM_ELD_CONN_TYPE_SHIFT 2 -# define DRM_ELD_CONN_TYPE_MASK (3 << 2) -# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) -# define DRM_ELD_CONN_TYPE_DP (1 << 2) -# define DRM_ELD_SUPPORTS_AI (1 << 1) -# define DRM_ELD_SUPPORTS_HDCP (1 << 0) - -#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ -# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ - -#define DRM_ELD_SPEAKER 7 -# define DRM_ELD_SPEAKER_MASK 0x7f -# define DRM_ELD_SPEAKER_RLRC (1 << 6) -# define DRM_ELD_SPEAKER_FLRC (1 << 5) -# define DRM_ELD_SPEAKER_RC (1 << 4) -# define DRM_ELD_SPEAKER_RLR (1 << 3) -# define DRM_ELD_SPEAKER_FC (1 << 2) -# define DRM_ELD_SPEAKER_LFE (1 << 1) -# define DRM_ELD_SPEAKER_FLR (1 << 0) - -#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ -# define DRM_ELD_PORT_ID_LEN 8 - -#define DRM_ELD_MANUFACTURER_NAME0 16 -#define DRM_ELD_MANUFACTURER_NAME1 17 - -#define DRM_ELD_PRODUCT_CODE0 18 -#define DRM_ELD_PRODUCT_CODE1 19 - -#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ - -#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) - struct edid { u8 header[8]; /* Vendor & product info */ @@ -387,11 +329,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); -#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE -int __drm_set_edid_firmware_path(const char *path); -int __drm_get_edid_firmware_path(char *buf, size_t bufsize); -#endif - bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); int @@ -410,96 +347,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, enum hdmi_quantization_range rgb_quant_range); /** - * drm_eld_mnl - Get ELD monitor name length in bytes. - * @eld: pointer to an eld memory structure with mnl set - */ -static inline int drm_eld_mnl(const uint8_t *eld) -{ - return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; -} - -/** - * drm_eld_sad - Get ELD SAD structures. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline const uint8_t *drm_eld_sad(const uint8_t *eld) -{ - unsigned int ver, mnl; - - ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; - if (ver != 2 && ver != 31) - return NULL; - - mnl = drm_eld_mnl(eld); - if (mnl > 16) - return NULL; - - return eld + DRM_ELD_CEA_SAD(mnl, 0); -} - -/** - * drm_eld_sad_count - Get ELD SAD count. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline int drm_eld_sad_count(const uint8_t *eld) -{ - return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> - DRM_ELD_SAD_COUNT_SHIFT; -} - -/** - * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes - * @eld: pointer to an eld memory structure with mnl and sad_count set - * - * This is a helper for determining the payload size of the baseline block, in - * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. - */ -static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) -{ - return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + - drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; -} - -/** - * drm_eld_size - Get ELD size in bytes - * @eld: pointer to a complete eld memory structure - * - * The returned value does not include the vendor block. It's vendor specific, - * and comprises of the remaining bytes in the ELD memory buffer after - * drm_eld_size() bytes of header and baseline block. - * - * The returned value is guaranteed to be a multiple of 4. - */ -static inline int drm_eld_size(const uint8_t *eld) -{ - return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; -} - -/** - * drm_eld_get_spk_alloc - Get speaker allocation - * @eld: pointer to an ELD memory structure - * - * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER - * field definitions to identify speakers. - */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) -{ - return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; -} - -/** - * drm_eld_get_conn_type - Get device type hdmi/dp connected - * @eld: pointer to an ELD memory structure - * - * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to - * identify the display type connected. - */ -static inline u8 drm_eld_get_conn_type(const uint8_t *eld) -{ - return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; -} - -/** * drm_edid_decode_mfg_id - Decode the manufacturer ID * @mfg_id: The manufacturer ID * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h new file mode 100644 index 000000000000..0a88d10b28b0 --- /dev/null +++ b/include/drm/drm_eld.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_ELD_H__ +#define __DRM_ELD_H__ + +#include <linux/types.h> + +struct cea_sad; + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const u8 *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad); +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad); + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const u8 *drm_eld_sad(const u8 *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const u8 *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const u8 *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const u8 *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const u8 *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const u8 *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +#endif /* __DRM_ELD_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 3a09682af685..977a9381c8ba 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -60,7 +60,7 @@ struct drm_encoder_funcs { * @late_register: * * This optional hook can be used to register additional userspace - * interfaces attached to the encoder like debugfs interfaces. + * interfaces attached to the encoder. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. @@ -81,6 +81,13 @@ struct drm_encoder_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_encoder *encoder); + + /** + * @debugfs_init: + * + * Allows encoders to create encoder-specific debugfs files. + */ + void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root); }; /** @@ -184,6 +191,13 @@ struct drm_encoder { const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; + + /** + * @debugfs_entry: + * + * Debugfs directory for this CRTC. + */ + struct dentry *debugfs_entry; }; #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index e1b5b4282f75..ab230d3af138 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -227,6 +227,18 @@ struct drm_file { bool is_master; /** + * @supports_virtualized_cursor_plane: + * + * This client is capable of handling the cursor plane with the + * restrictions imposed on it by the virtualized drivers. + * + * This implies that the cursor plane has to behave like a cursor + * i.e. track cursor movement. It also requires setting of the + * hotspot properties by the client on the cursor plane. + */ + bool supports_virtualized_cursor_plane; + + /** * @master: * * Master this node is currently associated with. Protected by struct @@ -374,11 +386,6 @@ struct drm_file { * Per-file buffer caches used by the PRIME buffer sharing code. */ struct drm_prime_file_private prime; - - /* private: */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) - unsigned long lock_count; /* DRI1 legacy lock count */ -#endif }; /** diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 21c3d512d25c..1eef3283a109 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -31,11 +31,10 @@ /** * DOC: flip utils * - * Util to queue up work to run from work-queue context after flip/vblank. + * Utility to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all thread-safe. - * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called - * in atomic context. + * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, + * drm_flip_work_commit() can be called in atomic context. */ struct drm_flip_work; @@ -52,16 +51,6 @@ struct drm_flip_work; typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** - * struct drm_flip_task - flip work task - * @node: list entry element - * @data: data to pass to &drm_flip_work.func - */ -struct drm_flip_task { - struct list_head node; - void *data; -}; - -/** * struct drm_flip_work - flip work queue * @name: debug name * @func: callback fxn called for each committed item @@ -79,9 +68,6 @@ struct drm_flip_work { spinlock_t lock; }; -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task); void drm_flip_work_queue(struct drm_flip_work *work, void *val); void drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq); diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 291deb09475b..f13b34e0b752 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -15,6 +15,57 @@ struct drm_rect; struct iosys_map; +/** + * struct drm_format_conv_state - Stores format-conversion state + * + * DRM helpers for format conversion store temporary state in + * struct drm_xfrm_buf. The buffer's resources can be reused + * among multiple conversion operations. + * + * All fields are considered private. + */ +struct drm_format_conv_state { + struct { + void *mem; + size_t size; + bool preallocated; + } tmp; +}; + +#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \ + .tmp = { \ + .mem = (_mem), \ + .size = (_size), \ + .preallocated = (_preallocated), \ + } \ + } + +/** + * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state + * + * Initializes an instance of struct drm_format_conv_state to default values. + */ +#define DRM_FORMAT_CONV_STATE_INIT \ + __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false) + +/** + * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state + * @_mem: The preallocated memory area + * @_size: The number of bytes in _mem + * + * Initializes an instance of struct drm_format_conv_state to preallocated + * storage. The caller is responsible for releasing the provided memory range. + */ +#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \ + __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true) + +void drm_format_conv_state_init(struct drm_format_conv_state *state); +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state); +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags); +void drm_format_conv_state_release(struct drm_format_conv_state *state); + unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, const struct drm_rect *clip); @@ -23,45 +74,49 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, const struct drm_rect *clip); void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached); + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab); + const struct drm_rect *clip, struct drm_format_conv_state *state, + bool swab); void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *rect); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); size_t drm_fb_build_fourcc_list(struct drm_device *dev, const u32 *native_fourccs, size_t native_nfourccs, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 80ece7b6dd9b..668077009fce 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -189,18 +189,6 @@ struct drm_framebuffer { */ int flags; /** - * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_x; - /** - * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor - * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR - * universal plane. - */ - int hot_y; - /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 16364487fde9..369505447acd 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -580,7 +580,7 @@ int drm_gem_evict(struct drm_gem_object *obj); * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object * @obj: the &drm_gem_object * - * This initializes the &drm_gem_object's &drm_gpuva list. + * This initializes the &drm_gem_object's &drm_gpuvm_bo list. * * Calling this function is only necessary for drivers intending to support the * &drm_driver_feature DRIVER_GEM_GPUVA. @@ -593,28 +593,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) } /** - * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the - * &drm_gpuva_manager. + * This iterator walks over all &drm_gpuvm_bo structures associated with the + * &drm_gem_object. */ -#define drm_gem_for_each_gpuva(entry__, obj__) \ - list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ + list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) /** - * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of - * gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @next__: &next &drm_gpuva to store the next step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of + * &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step + * @next__: &next &drm_gpuvm_bo to store the next step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the + * This iterator walks over all &drm_gpuvm_bo structures associated with the * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence * it is save against removal of elements. */ -#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \ - list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 40b8b039518e..3e01c619a25e 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -5,6 +5,7 @@ #include <linux/iosys-map.h> +#include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -49,6 +50,15 @@ struct drm_shadow_plane_state { /** @base: plane state */ struct drm_plane_state base; + /** + * @fmtcnv_state: Format-conversion state + * + * Per-plane state for format conversion. + * Flags for copying shadow buffers into backend storage. Also holds + * temporary storage for format conversion. + */ + struct drm_format_conv_state fmtcnv_state; + /* Transitional state - do not export or duplicate */ /** diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index bdfafc4a7705..6258849382e1 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -25,13 +25,17 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/dma-resv.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/types.h> +#include <drm/drm_device.h> #include <drm/drm_gem.h> +#include <drm/drm_exec.h> struct drm_gpuvm; +struct drm_gpuvm_bo; struct drm_gpuvm_ops; /** @@ -73,6 +77,12 @@ struct drm_gpuva { struct drm_gpuvm *vm; /** + * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped + * &drm_gem_object + */ + struct drm_gpuvm_bo *vm_bo; + + /** * @flags: the &drm_gpuva_flags for this mapping */ enum drm_gpuva_flags flags; @@ -107,7 +117,7 @@ struct drm_gpuva { struct drm_gem_object *obj; /** - * @entry: the &list_head to attach this object to a &drm_gem_object + * @entry: the &list_head to attach this object to a &drm_gpuvm_bo */ struct list_head entry; } gem; @@ -140,7 +150,7 @@ struct drm_gpuva { int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); void drm_gpuva_remove(struct drm_gpuva *va); -void drm_gpuva_link(struct drm_gpuva *va); +void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); void drm_gpuva_unlink(struct drm_gpuva *va); struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, @@ -184,6 +194,22 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) } /** + * enum drm_gpuvm_flags - flags for struct drm_gpuvm + */ +enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the + * GPUVM's &dma_resv lock + */ + DRM_GPUVM_RESV_PROTECTED = BIT(0), + + /** + * @DRM_GPUVM_USERBITS: user defined bits + */ + DRM_GPUVM_USERBITS = BIT(1), +}; + +/** * struct drm_gpuvm - DRM GPU VA Manager * * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using @@ -202,6 +228,16 @@ struct drm_gpuvm { const char *name; /** + * @flags: the &drm_gpuvm_flags of this GPUVM + */ + enum drm_gpuvm_flags flags; + + /** + * @drm: the &drm_device this VM lives in + */ + struct drm_device *drm; + + /** * @mm_start: start of the VA space */ u64 mm_start; @@ -227,6 +263,11 @@ struct drm_gpuvm { } rb; /** + * @kref: reference count of this object + */ + struct kref kref; + + /** * @kernel_alloc_node: * * &drm_gpuva representing the address space cutout reserved for @@ -238,16 +279,147 @@ struct drm_gpuvm { * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers */ const struct drm_gpuvm_ops *ops; + + /** + * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. + */ + struct drm_gem_object *r_obj; + + /** + * @extobj: structure holding the extobj list + */ + struct { + /** + * @list: &list_head storing &drm_gpuvm_bos serving as + * external object + */ + struct list_head list; + + /** + * @local_list: pointer to the local list temporarily storing + * entries from the external object list + */ + struct list_head *local_list; + + /** + * @lock: spinlock to protect the extobj list + */ + spinlock_t lock; + } extobj; + + /** + * @evict: structure holding the evict list and evict list lock + */ + struct { + /** + * @list: &list_head storing &drm_gpuvm_bos currently being + * evicted + */ + struct list_head list; + + /** + * @local_list: pointer to the local list temporarily storing + * entries from the evicted object list + */ + struct list_head *local_list; + + /** + * @lock: spinlock to protect the evict list + */ + spinlock_t lock; + } evict; }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, + struct drm_device *drm, + struct drm_gem_object *r_obj, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops); -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); +/** + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to acquire the reference of + * + * This function acquires an additional reference to @gpuvm. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline struct drm_gpuvm * +drm_gpuvm_get(struct drm_gpuvm *gpuvm) +{ + kref_get(&gpuvm->kref); + + return gpuvm; +} + +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); + +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm); + +/** + * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. + */ +static inline bool +drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; +} + +/** + * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gpuvm's shared &dma_resv + */ +#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) + +/** + * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's + * &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared + * &dma_resv + */ +#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + +/** + * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an + * external object + * @gpuvm: the &drm_gpuvm to check + * @obj: the &drm_gem_object to check + * + * Returns: true if the &drm_gem_object &dma_resv differs from the + * &drm_gpuvms &dma_resv, false otherwise + */ +static inline bool +drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + return obj && obj->resv != drm_gpuvm_resv(gpuvm); +} + static inline struct drm_gpuva * __drm_gpuva_next(struct drm_gpuva *va) { @@ -327,6 +499,285 @@ __drm_gpuva_next(struct drm_gpuva *va) list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) /** + * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec + * + * This structure should be created on the stack as &drm_exec should be. + * + * Optionally, @extra can be set in order to lock additional &drm_gem_objects. + */ +struct drm_gpuvm_exec { + /** + * @exec: the &drm_exec structure + */ + struct drm_exec exec; + + /** + * @flags: the flags for the struct drm_exec + */ + uint32_t flags; + + /** + * @vm: the &drm_gpuvm to lock its DMA reservations + */ + struct drm_gpuvm *vm; + + /** + * @num_fences: the number of fences to reserve for the &dma_resv of the + * locked &drm_gem_objects + */ + unsigned int num_fences; + + /** + * @extra: Callback and corresponding private data for the driver to + * lock arbitrary additional &drm_gem_objects. + */ + struct { + /** + * @fn: The driver callback to lock additional &drm_gem_objects. + */ + int (*fn)(struct drm_gpuvm_exec *vm_exec); + + /** + * @priv: driver private data for the @fn callback + */ + void *priv; + } extra; +}; + +int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + u64 addr, u64 range, + unsigned int num_fences); + +int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); + +int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs); + +int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range); + +/** + * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Releases all dma-resv locks of all &drm_gem_objects previously acquired + * through drm_gpuvm_exec_lock() or its variants. + * + * Returns: 0 on success, negative error code on failure. + */ +static inline void +drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) +{ + drm_exec_fini(&vm_exec->exec); +} + +int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); +void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage); + +/** + * drm_gpuvm_exec_resv_add_fence() + * @vm_exec: the &drm_gpuvm_exec wrapper + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + * + * See drm_gpuvm_resv_add_fence(). + */ +static inline void +drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, + private_usage, extobj_usage); +} + +/** + * drm_gpuvm_exec_validate() + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * See drm_gpuvm_validate(). + */ +static inline int +drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) +{ + return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); +} + +/** + * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and + * &drm_gem_object combination + * + * This structure is an abstraction representing a &drm_gpuvm and + * &drm_gem_object combination. It serves as an indirection to accelerate + * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same + * &drm_gem_object. + * + * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to + * accelerate validation. + * + * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once + * a GEM object is mapped first in a GPU-VM and release the instance once the + * last mapping of the GEM object in this GPU-VM is unmapped. + */ +struct drm_gpuvm_bo { + /** + * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference + * counted pointer. + */ + struct drm_gpuvm *vm; + + /** + * @obj: The &drm_gem_object being mapped in @vm. This is a reference + * counted pointer. + */ + struct drm_gem_object *obj; + + /** + * @evicted: Indicates whether the &drm_gem_object is evicted; field + * protected by the &drm_gem_object's dma-resv lock. + */ + bool evicted; + + /** + * @kref: The reference count for this &drm_gpuvm_bo. + */ + struct kref kref; + + /** + * @list: Structure containing all &list_heads. + */ + struct { + /** + * @gpuva: The list of linked &drm_gpuvas. + * + * It is safe to access entries from this list as long as the + * GEM's gpuva lock is held. See also struct drm_gem_object. + */ + struct list_head gpuva; + + /** + * @entry: Structure containing all &list_heads serving as + * entry. + */ + struct { + /** + * @gem: List entry to attach to the &drm_gem_objects + * gpuva list. + */ + struct list_head gem; + + /** + * @evict: List entry to attach to the &drm_gpuvms + * extobj list. + */ + struct list_head extobj; + + /** + * @evict: List entry to attach to the &drm_gpuvms evict + * list. + */ + struct list_head evict; + } entry; + } list; +}; + +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of + * + * This function acquires an additional reference to @vm_bo. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline struct drm_gpuvm_bo * +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) +{ + kref_get(&vm_bo->kref); + return vm_bo; +} + +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); + +/** + * drm_gpuvm_bo_gem_evict() + * @obj: the &drm_gem_object + * @evict: indicates whether @obj is evicted + * + * See drm_gpuvm_bo_evict(). + */ +static inline void +drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_gpuva_assert_lock_held(obj); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) + drm_gpuvm_bo_evict(vm_bo, evict); +} + +void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ + list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of + * &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @next__: &next &drm_gpuva to store the next step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ + list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) + +/** * enum drm_gpuva_op_type - GPU VA operation type * * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. @@ -595,8 +1046,7 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); struct drm_gpuva_ops * -drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, - struct drm_gem_object *obj); +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, struct drm_gpuva_ops *ops); @@ -617,6 +1067,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, */ struct drm_gpuvm_ops { /** + * @vm_free: called when the last reference of a struct drm_gpuvm is + * dropped + * + * This callback is mandatory. + */ + void (*vm_free)(struct drm_gpuvm *gpuvm); + + /** * @op_alloc: called when the &drm_gpuvm allocates * a struct drm_gpuva_op * @@ -641,6 +1099,42 @@ struct drm_gpuvm_ops { void (*op_free)(struct drm_gpuva_op *op); /** + * @vm_bo_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuvm_bo *(*vm_bo_alloc)(void); + + /** + * @vm_bo_free: called when the &drm_gpuvm frees a + * struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); + + /** + * @vm_bo_validate: called from drm_gpuvm_validate() + * + * Drivers receive this callback for every evicted &drm_gem_object being + * mapped in the corresponding &drm_gpuvm. + * + * Typically, drivers would call their driver specific variant of + * ttm_bo_validate() from within this callback. + */ + int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, + struct drm_exec *exec); + + /** * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the * mapping once all previous steps were completed * @@ -702,4 +1196,32 @@ void drm_gpuva_remap(struct drm_gpuva *prev, void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); +/** + * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of + * the unmap stage of a remap op. + * @op: Remap op. + * @start_addr: Output pointer for the start of the required unmap. + * @range: Output pointer for the length of the required unmap. + * + * The given start address and range will be set such that they represent the + * range of the address space that was previously covered by the mapping being + * re-mapped, but is now empty. + */ +static inline void +drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op, + u64 *start_addr, u64 *range) +{ + const u64 va_start = op->prev ? + op->prev->va.addr + op->prev->va.range : + op->unmap->va->va.addr; + const u64 va_end = op->next ? + op->next->va.addr : + op->unmap->va->va.addr + op->unmap->va->va.range; + + if (start_addr) + *start_addr = va_start; + if (range) + *range = va_end - va_start; +} + #endif /* __DRM_GPUVM_H__ */ diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 6ed61c371f6c..171760b6c4a1 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -110,17 +110,6 @@ enum drm_ioctl_flags { */ DRM_ROOT_ONLY = BIT(2), /** - * @DRM_UNLOCKED: - * - * Whether &drm_ioctl_desc.func should be called with the DRM BKL held - * or not. Enforced as the default for all modern drivers, hence there - * should never be a need to set this flag. - * - * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the - * only legacy IOCTL which needs this. - */ - DRM_UNLOCKED = BIT(4), - /** * @DRM_RENDER_ALLOW: * * This is used for all ioctl needed for rendering only, for drivers diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h deleted file mode 100644 index 0fc85418aad8..000000000000 --- a/include/drm/drm_legacy.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef __DRM_DRM_LEGACY_H__ -#define __DRM_DRM_LEGACY_H__ -/* - * Legacy driver interfaces for the Direct Rendering Manager - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright (c) 2009-2010, Code Aurora Forum. - * All rights reserved. - * Copyright © 2014 Intel Corporation - * Daniel Vetter <[email protected]> - * - * Author: Rickard E. (Rik) Faith <[email protected]> - * Author: Gareth Hughes <[email protected]> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/agp_backend.h> - -#include <drm/drm.h> -#include <drm/drm_auth.h> - -struct drm_device; -struct drm_driver; -struct file; -struct pci_driver; - -/* - * Legacy Support for palateontologic DRM drivers - * - * If you add a new driver and it uses any of these functions or structures, - * you're doing it terribly wrong. - */ - -/* - * Hash-table Support - */ - -struct drm_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct drm_open_hash { - struct hlist_head *table; - u8 order; -}; - -/** - * DMA buffer. - */ -struct drm_buf { - int idx; /**< Index into master buflist */ - int total; /**< Buffer size */ - int order; /**< log-base-2(total) */ - int used; /**< Amount of buffer in use (for DMA) */ - unsigned long offset; /**< Byte offset (used internally) */ - void *address; /**< Address of buffer */ - unsigned long bus_address; /**< Bus address of buffer */ - struct drm_buf *next; /**< Kernel-only: used for free list */ - __volatile__ int waiting; /**< On kernel DMA queue */ - __volatile__ int pending; /**< On hardware DMA queue */ - struct drm_file *file_priv; /**< Private of holding file descr */ - int context; /**< Kernel queue for this buffer */ - int while_locked; /**< Dispatch this buffer while locked */ - enum { - DRM_LIST_NONE = 0, - DRM_LIST_FREE = 1, - DRM_LIST_WAIT = 2, - DRM_LIST_PEND = 3, - DRM_LIST_PRIO = 4, - DRM_LIST_RECLAIM = 5 - } list; /**< Which list we're on */ - - int dev_priv_size; /**< Size of buffer private storage */ - void *dev_private; /**< Per-buffer private storage */ -}; - -typedef struct drm_dma_handle { - dma_addr_t busaddr; - void *vaddr; - size_t size; -} drm_dma_handle_t; - -/** - * Buffer entry. There is one of this for each buffer size order. - */ -struct drm_buf_entry { - int buf_size; /**< size */ - int buf_count; /**< number of buffers */ - struct drm_buf *buflist; /**< buffer list */ - int seg_count; - int page_order; - struct drm_dma_handle **seglist; - - int low_mark; /**< Low water mark */ - int high_mark; /**< High water mark */ -}; - -/** - * DMA data. - */ -struct drm_device_dma { - - struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ - int buf_count; /**< total number of buffers */ - struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ - int seg_count; - int page_count; /**< number of pages */ - unsigned long *pagelist; /**< page list */ - unsigned long byte_count; - enum { - _DRM_DMA_USE_AGP = 0x01, - _DRM_DMA_USE_SG = 0x02, - _DRM_DMA_USE_FB = 0x04, - _DRM_DMA_USE_PCI_RO = 0x08 - } flags; - -}; - -/** - * Scatter-gather memory. - */ -struct drm_sg_mem { - unsigned long handle; - void *virtual; - int pages; - struct page **pagelist; - dma_addr_t *busaddr; -}; - -/** - * Kernel side of a mapping - */ -struct drm_local_map { - dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ - unsigned long size; /**< Requested physical size (bytes) */ - enum drm_map_type type; /**< Type of memory to map */ - enum drm_map_flags flags; /**< Flags */ - void *handle; /**< User-space: "Handle" to pass to mmap() */ - /**< Kernel-space: kernel-virtual address */ - int mtrr; /**< MTRR slot used */ -}; - -typedef struct drm_local_map drm_local_map_t; - -/** - * Mappings list - */ -struct drm_map_list { - struct list_head head; /**< list head */ - struct drm_hash_item hash; - struct drm_local_map *map; /**< mapping */ - uint64_t user_token; - struct drm_master *master; -}; - -int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_p); -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); -void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); -int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); - -int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); -int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); - -/** - * Test that the hardware lock is held by the caller, returning otherwise. - * - * \param dev DRM device. - * \param filp file pointer of the caller. - */ -#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ -do { \ - if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ - _file_priv->master->lock.file_priv != _file_priv) { \ - DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ - __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ - _file_priv->master->lock.file_priv, _file_priv); \ - return -EINVAL; \ - } \ -} while (0) - -void drm_legacy_idlelock_take(struct drm_lock_data *lock); -void drm_legacy_idlelock_release(struct drm_lock_data *lock); - -/* drm_irq.c */ -int drm_legacy_irq_uninstall(struct drm_device *dev); - -/* drm_pci.c */ - -#ifdef CONFIG_PCI - -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver); -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver); - -#else - -static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, - size_t size, size_t align) -{ - return NULL; -} - -static inline void drm_pci_free(struct drm_device *dev, - struct drm_dma_handle *dmah) -{ -} - -static inline int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ -} - -#endif - -/* - * AGP Support - */ - -struct drm_agp_head { - struct agp_kern_info agp_info; - struct list_head memory; - unsigned long mode; - struct agp_bridge_data *bridge; - int enabled; - int acquired; - unsigned long base; - int agp_mtrr; - int cant_use_aperture; - unsigned long page_mask; -}; - -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); -int drm_legacy_agp_acquire(struct drm_device *dev); -int drm_legacy_agp_release(struct drm_device *dev); -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -#else -static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - return NULL; -} - -static inline int drm_legacy_agp_acquire(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_release(struct drm_device *dev) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_enable(struct drm_device *dev, - struct drm_agp_mode mode) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_info(struct drm_device *dev, - struct drm_agp_info *info) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_alloc(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_free(struct drm_device *dev, - struct drm_agp_buffer *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_unbind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} - -static inline int drm_legacy_agp_bind(struct drm_device *dev, - struct drm_agp_binding *request) -{ - return -ENODEV; -} -#endif - -/* drm_memory.c */ -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); - -#endif /* __DRM_DRM_LEGACY_H__ */ diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 816f196b3d4c..e8e0f8d39f3a 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -12,6 +12,7 @@ #include <drm/drm_device.h> #include <drm/drm_simple_kms_helper.h> +struct drm_format_conv_state; struct drm_rect; struct gpio_desc; struct iosys_map; @@ -192,7 +193,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, size_t len); int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap); + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index c9df0407980c..c0aec0d4d664 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -168,6 +168,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -184,6 +185,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index e3c3ac615909..a33cf7488737 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1154,6 +1154,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_enable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*enable_hpd)(struct drm_connector *connector); @@ -1165,6 +1170,11 @@ struct drm_connector_helper_funcs { * This operation is optional. * * This callback is used by the drm_kms_helper_poll_disable() helpers. + * + * This operation does not need to perform any hpd state tracking as + * the DRM core handles that maintenance and ensures the calls to enable + * and disable hpd are balanced. + * */ void (*disable_hpd)(struct drm_connector *connector); }; diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 79d62856defb..c6565a6f9324 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -116,6 +116,10 @@ struct drm_plane_state { /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; + /** @hotspot_x: x offset to mouse cursor hotspot */ + /** @hotspot_y: y offset to mouse cursor hotspot */ + int32_t hotspot_x, hotspot_y; + /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as @@ -191,6 +195,16 @@ struct drm_plane_state { struct drm_property_blob *fb_damage_clips; /** + * @ignore_damage_clips: + * + * Set by drivers to indicate the drm_atomic_helper_damage_iter_init() + * helper that the @fb_damage_clips blob property should be ignored. + * + * See :ref:`damage_tracking_properties` for more information. + */ + bool ignore_damage_clips; + + /** * @src: * * source coordinates of the plane (in 16.16). @@ -748,6 +762,16 @@ struct drm_plane { * scaling. */ struct drm_property *scaling_filter_property; + + /** + * @hotspot_x_property: property to set mouse hotspot x offset. + */ + struct drm_property *hotspot_x_property; + + /** + * @hotspot_y_property: property to set mouse hotspot y offset. + */ + struct drm_property *hotspot_y_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 3a574e8cd22f..75f9c4830564 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,7 +26,6 @@ #include <linux/types.h> -struct drm_atomic_state; struct drm_crtc; struct drm_framebuffer; struct drm_modeset_acquire_ctx; @@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr int drm_plane_helper_disable_primary(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); void drm_plane_helper_destroy(struct drm_plane *plane); -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); /** * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a93a387f8a1a..dd4883df876a 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -453,7 +453,7 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ - dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__) + dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index d2fb81e34174..5acc64954a88 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -63,10 +63,10 @@ struct drm_file; * to an array, and as such should start at 0. */ enum drm_sched_priority { - DRM_SCHED_PRIORITY_MIN, - DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_HIGH, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_LOW, DRM_SCHED_PRIORITY_COUNT }; @@ -320,6 +320,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. + * @credits: the number of credits this job contributes to the scheduler * @work: Helper to reschdeule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang @@ -339,6 +340,8 @@ struct drm_sched_job { struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; + u32 credits; + /* * work is used only after finish_cb has been used and will not be * accessed anymore. @@ -462,29 +465,42 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); + + /** + * @update_job_credits: Called when the scheduler is considering this + * job for execution. + * + * This callback returns the number of credits the job would take if + * pushed to the hardware. Drivers may use this to dynamically update + * the job's credit count. For instance, deduct the number of credits + * for already signalled native fences. + * + * This callback is optional. + */ + u32 (*update_job_credits)(struct drm_sched_job *sched_job); }; /** * struct drm_gpu_scheduler - scheduler instance-specific data * * @ops: backend operations provided by the driver. - * @hw_submission_limit: the max size of the hardware queue. + * @credit_limit: the credit limit of this scheduler + * @credit_count: the current credit count of this scheduler * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, * as there's usually one run-queue per priority, but could be less. * @sched_rq: An allocated array of run-queues of size @num_rqs; - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job - * is ready to be scheduled. * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. - * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr + * @work_run_job: work which calls run_job op of each scheduler. + * @work_free_job: work which calls free_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. - * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked @@ -493,24 +509,27 @@ struct drm_sched_backend_ops { * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. + * @pause_submit: pause queuing of @work_run_job on @submit_wq + * @own_submit_wq: scheduler owns allocation of @submit_wq * @dev: system &struct device * * One scheduler is implemented for each hardware ring. */ struct drm_gpu_scheduler { const struct drm_sched_backend_ops *ops; - uint32_t hw_submission_limit; + u32 credit_limit; + atomic_t credit_count; long timeout; const char *name; u32 num_rqs; struct drm_sched_rq **sched_rq; - wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; + struct work_struct work_run_job; + struct work_struct work_free_job; struct delayed_work work_tdr; - struct task_struct *thread; struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; @@ -518,19 +537,22 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + bool pause_submit; + bool own_submit_wq; struct device *dev; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, + struct workqueue_struct *submit_wq, + u32 num_rqs, u32 credit_limit, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner); + u32 credits, void *owner); void drm_sched_job_arm(struct drm_sched_job *job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); @@ -550,8 +572,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h index 2580e05a8ab6..004b38a538ff 100644 --- a/include/linux/blk-pm.h +++ b/include/linux/blk-pm.h @@ -15,7 +15,6 @@ extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q); -extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, struct device *dev) {} diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b4825d3cdb29..6762dac3ef76 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -56,7 +56,7 @@ extern struct idr btf_idr; extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj; extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; -extern bool bpf_global_ma_set, bpf_global_percpu_ma_set; +extern bool bpf_global_ma_set; typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, @@ -909,10 +909,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) aux->ctx_field_size = size; } +static bool bpf_is_ldimm64(const struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + static inline bool bpf_pseudo_func(const struct bpf_insn *insn) { - return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && - insn->src_reg == BPF_PSEUDO_FUNC; + return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; } struct bpf_prog_ops { diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 24213a99cc79..aa4d19d0bc94 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -301,6 +301,17 @@ struct bpf_func_state { struct tnum callback_ret_range; bool in_async_callback_fn; bool in_exception_callback_fn; + /* For callback calling functions that limit number of possible + * callback executions (e.g. bpf_loop) keeps track of current + * simulated iteration number. + * Value in frame N refers to number of times callback with frame + * N+1 was simulated, e.g. for the following call: + * + * bpf_loop(..., fn, ...); | suppose current frame is N + * | fn would be simulated in frame N+1 + * | number of simulations is tracked in frame N + */ + u32 callback_depth; /* The following fields should be last. See copy_func_state() */ int acquired_refs; @@ -400,6 +411,7 @@ struct bpf_verifier_state { struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; u32 dfs_depth; + u32 callback_unroll_depth; }; #define bpf_get_spilled_reg(slot, frame, mask) \ @@ -511,6 +523,10 @@ struct bpf_insn_aux_data { * this instruction, regardless of any heuristics */ bool force_checkpoint; + /* true if instruction is a call to a helper function that + * accepts callback function as a parameter. + */ + bool calls_callback; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d305db70674b..efc0c0b07efb 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -195,6 +195,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_HRTIMERS_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3f31baa3293f..8ff4add71f88 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -343,16 +343,19 @@ struct dma_buf { /** * @exp_name: * - * Name of the exporter; useful for debugging. See the - * DMA_BUF_SET_NAME IOCTL. + * Name of the exporter; useful for debugging. Must not be NULL */ const char *exp_name; /** * @name: * - * Userspace-provided name; useful for accounting and debugging, - * protected by dma_resv_lock() on @resv and @name_lock for read access. + * Userspace-provided name. Default value is NULL. If not NULL, + * length cannot be longer than DMA_BUF_NAME_LEN, including NIL + * char. Useful for accounting and debugging. Read/Write accesses + * are protected by @name_lock + * + * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B */ const char *name; diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h index 45fca09b2319..69501e0ec239 100644 --- a/include/linux/export-internal.h +++ b/include/linux/export-internal.h @@ -50,9 +50,7 @@ " .previous" "\n" \ ) -#ifdef CONFIG_IA64 -#define KSYM_FUNC(name) @fptr(name) -#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT) +#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT) #define KSYM_FUNC(name) P%name #else #define KSYM_FUNC(name) name diff --git a/include/linux/fb.h b/include/linux/fb.h index 94e2c44c6569..05dc9624897d 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos); +int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma); #define __FB_DEFAULT_IOMEM_OPS_RDWR \ .fb_read = fb_io_read, \ @@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf, .fb_imageblit = cfb_imageblit #define __FB_DEFAULT_IOMEM_OPS_MMAP \ - .fb_mmap = NULL /* default implementation */ + .fb_mmap = fb_io_mmap #define FB_DEFAULT_IOMEM_OPS \ __FB_DEFAULT_IOMEM_OPS_RDWR, \ @@ -848,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname) } #endif -/* Convenience logging macros */ +/* + * Convenience logging macros + */ + #define fb_err(fb_info, fmt, ...) \ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) #define fb_notice(info, fmt, ...) \ @@ -860,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname) #define fb_dbg(fb_info, fmt, ...) \ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_warn_once(fb_info, fmt, ...) \ + pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) + +#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \ + WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_WARN_ON_ONCE(fb_info, x) \ + fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")") + #endif /* _LINUX_FB_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 5a8387a4a712..bf43f3ff6664 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -679,6 +679,7 @@ struct hid_device { /* device report descriptor */ struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; + struct kref ref; unsigned int id; /* system unique id */ @@ -687,6 +688,8 @@ struct hid_device { /* device report descriptor */ #endif /* CONFIG_BPF */ }; +void hiddev_free(struct kref *ref); + #define to_hid_device(pdev) \ container_of(pdev, struct hid_device, dev) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0ee140176f10..f2044d5a652b 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -531,9 +531,9 @@ extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU -int hrtimers_dead_cpu(unsigned int cpu); +int hrtimers_cpu_dying(unsigned int cpu); #else -#define hrtimers_dead_cpu NULL +#define hrtimers_cpu_dying NULL #endif #endif diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index 1b06d074ade0..e3649a6563dd 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -168,9 +168,9 @@ struct iosys_map { * about the use of uninitialized variable. */ #define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ - struct iosys_map copy = *map_; \ - iosys_map_incr(©, offset_); \ - copy; \ + struct iosys_map copy_ = *map_; \ + iosys_map_incr(©_, offset_); \ + copy_; \ }) /** @@ -391,14 +391,14 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * Returns: * The value read from the mapping. */ -#define iosys_map_rd(map__, offset__, type__) ({ \ - type__ val; \ - if ((map__)->is_iomem) { \ - __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ - val; \ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val_; \ + if ((map__)->is_iomem) { \ + __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ + val_; \ }) /** @@ -413,13 +413,13 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * or if pointer may be unaligned (and problematic for the architecture * supported), use iosys_map_memcpy_to() */ -#define iosys_map_wr(map__, offset__, type__, val__) ({ \ - type__ val = (val__); \ - if ((map__)->is_iomem) { \ - __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val_ = (val__); \ + if ((map__)->is_iomem) { \ + __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ }) /** @@ -485,9 +485,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * The value read from the mapping. */ #define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__)); \ + typeof(s_->field__)); \ }) /** @@ -508,9 +508,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * usage and memory layout. */ #define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__), val__); \ + typeof(s_->field__), val__); \ }) #endif /* __IOSYS_MAP_H__ */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 8fa23bdcedbf..007fd9c3e4b6 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -420,7 +420,7 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising) * A function that translates value of following registers to the linkmode: * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20) * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60) - * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61) + * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61) */ static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val) { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a16c9cc063fe..2564e209465e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1797,6 +1797,13 @@ enum netdev_ml_priv_type { ML_PRIV_CAN, }; +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE, + NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ + NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ + NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ +}; + /** * struct net_device - The DEVICE structure. * @@ -1991,10 +1998,14 @@ enum netdev_ml_priv_type { * * @ml_priv: Mid-layer private * @ml_priv_type: Mid-layer private type - * @lstats: Loopback statistics - * @tstats: Tunnel statistics - * @dstats: Dummy statistics - * @vstats: Virtual ethernet statistics + * + * @pcpu_stat_type: Type of device statistics which the core should + * allocate/free: none, lstats, tstats, dstats. none + * means the driver is handling statistics allocation/ + * freeing internally. + * @lstats: Loopback statistics: packets, bytes + * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes + * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes * * @garp_port: GARP * @mrp_port: MRP @@ -2354,6 +2365,7 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; @@ -2755,6 +2767,16 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); +struct pcpu_dstats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + struct u64_stats_sync syncp; +} __aligned(8 * sizeof(u64)); + struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bcc1ea44b4e8..06142ff7f9ce 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -204,6 +204,8 @@ enum mapping_flags { AS_NO_WRITEBACK_TAGS = 5, AS_LARGE_FOLIO_SUPPORT = 6, AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ + AS_STABLE_WRITES, /* must wait for writeback before modifying + folio contents */ }; /** @@ -289,6 +291,21 @@ static inline void mapping_clear_release_always(struct address_space *mapping) clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); } +static inline bool mapping_stable_writes(const struct address_space *mapping) +{ + return test_bit(AS_STABLE_WRITES, &mapping->flags); +} + +static inline void mapping_set_stable_writes(struct address_space *mapping) +{ + set_bit(AS_STABLE_WRITES, &mapping->flags); +} + +static inline void mapping_clear_stable_writes(struct address_space *mapping) +{ + clear_bit(AS_STABLE_WRITES, &mapping->flags); +} + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index afb028c54f33..5547ba68e6e4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -843,11 +843,11 @@ struct perf_event { }; /* - * ,-----------------------[1:n]----------------------. - * V V - * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event - * ^ ^ | | - * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-' + * ,-----------------------[1:n]------------------------. + * V V + * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event + * | | + * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed @@ -865,6 +865,9 @@ struct perf_event { * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. + * + * perf_event holds a refcount on perf_event_context + * perf_event holds a refcount on perf_event_pmu_context */ struct perf_event_pmu_context { struct pmu *pmu; diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 84aa448d8bb3..c3a00b967d18 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -47,8 +47,17 @@ #define SZ_8G _AC(0x200000000, ULL) #define SZ_16G _AC(0x400000000, ULL) #define SZ_32G _AC(0x800000000, ULL) +#define SZ_64G _AC(0x1000000000, ULL) +#define SZ_128G _AC(0x2000000000, ULL) +#define SZ_256G _AC(0x4000000000, ULL) +#define SZ_512G _AC(0x8000000000, ULL) #define SZ_1T _AC(0x10000000000, ULL) +#define SZ_2T _AC(0x20000000000, ULL) +#define SZ_4T _AC(0x40000000000, ULL) +#define SZ_8T _AC(0x80000000000, ULL) +#define SZ_16T _AC(0x100000000000, ULL) +#define SZ_32T _AC(0x200000000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index c36e7a3b45e7..3be2cb564710 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -14,6 +14,7 @@ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK #include <asm/stacktrace.h> +#include <linux/linkage.h> /* * The lowest address on tsk's stack which we can plausibly erase. @@ -76,6 +77,11 @@ static inline void stackleak_task_init(struct task_struct *t) # endif } +asmlinkage void noinstr stackleak_erase(void); +asmlinkage void noinstr stackleak_erase_on_task_stack(void); +asmlinkage void noinstr stackleak_erase_off_task_stack(void); +void __no_caller_saved_registers noinstr stackleak_track_stack(void); + #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } #endif diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index b513749582d7..e4de6bc1f69b 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -144,10 +144,6 @@ struct usb_phy { */ int (*set_wakeup)(struct usb_phy *x, bool enabled); - /* notify phy port status change */ - int (*notify_port_status)(struct usb_phy *x, int port, - u16 portstatus, u16 portchange); - /* notify phy connect status change */ int (*notify_connect)(struct usb_phy *x, enum usb_device_speed speed); @@ -321,15 +317,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled) } static inline int -usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange) -{ - if (x && x->notify_port_status) - return x->notify_port_status(x, port, portstatus, portchange); - else - return 0; -} - -static inline int usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) { if (x && x->notify_connect) diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index d0f2797420f7..a09e13a577a9 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -5,13 +5,6 @@ #include <linux/pci.h> #include <linux/virtio_pci.h> -struct virtio_pci_modern_common_cfg { - struct virtio_pci_common_cfg cfg; - - __le16 queue_notify_data; /* read-write */ - __le16 queue_reset; /* read-write */ -}; - /** * struct virtio_pci_modern_device - info for modern PCI virtio * @pci_dev: Ptr to the PCI device struct diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3bbd13ab1ecf..b157c5cafd14 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg) return *(__force __be32 *)sreg; } -static inline void nft_reg_store64(u32 *dreg, u64 val) +static inline void nft_reg_store64(u64 *dreg, u64 val) { - put_unaligned(val, (u64 *)dreg); + put_unaligned(val, dreg); } static inline u64 nft_reg_load64(const u32 *sreg) diff --git a/include/net/netkit.h b/include/net/netkit.h index 0ba2e6b847ca..9ec0163739f4 100644 --- a/include/net/netkit.h +++ b/include/net/netkit.h @@ -10,6 +10,7 @@ int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev)); #else static inline int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) @@ -34,5 +35,10 @@ static inline int netkit_prog_query(const union bpf_attr *attr, { return -EINVAL; } + +static inline struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return NULL; +} #endif /* CONFIG_NETKIT */ #endif /* __NET_NETKIT_H */ diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index 8a6dbfb23336..77f87c622a2e 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -58,6 +58,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) return to_ct_params(a)->nf_ft; } +static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a) +{ + return to_ct_params(a)->helper; +} + #else static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; } static inline int tcf_ct_action(const struct tc_action *a) { return 0; } @@ -65,6 +70,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) { return NULL; } +static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a) +{ + return NULL; +} #endif /* CONFIG_NF_CONNTRACK */ #if IS_ENABLED(CONFIG_NET_ACT_CT) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4c53a5ef6257..f7e537f64db4 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -328,7 +328,7 @@ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ - EM(rxrpc_rtt_rx_cancel, "CNCL") \ + EM(rxrpc_rtt_rx_other_ack, "OACK") \ EM(rxrpc_rtt_rx_obsolete, "OBSL") \ EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index de723566c5ae..16122819edfe 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -713,7 +713,8 @@ struct drm_gem_open { /** * DRM_CAP_ASYNC_PAGE_FLIP * - * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy + * page-flips. */ #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 /** @@ -773,6 +774,13 @@ struct drm_gem_open { * :ref:`drm_sync_objects`. */ #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 +/** + * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic + * commits. + */ +#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15 /* DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -842,6 +850,31 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 +/** + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT + * + * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and + * virtualbox) have additional restrictions for cursor planes (thus + * making cursor planes on those drivers not truly universal,) e.g. + * they need cursor planes to act like one would expect from a mouse + * cursor and have correctly set hotspot properties. + * If this client cap is not set the DRM core will hide cursor plane on + * those virtualized drivers because not setting it implies that the + * client is not capable of dealing with those extra restictions. + * Clients which do set cursor hotspot and treat the cursor plane + * like a mouse cursor should set this property. + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. + * + * Setting this property on drivers which do not special case + * cursor planes (i.e. non-virtualized drivers) will return + * EOPNOTSUPP, which can be used by userspace to gauge + * requirements of the hardware/drivers they're running on. + * + * This capability is always supported for atomic-capable virtualized + * drivers starting from kernel version 6.6. + */ +#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 + /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; @@ -893,6 +926,7 @@ struct drm_syncobj_transfer { #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */ struct drm_syncobj_wait { __u64 handles; /* absolute timeout */ @@ -901,6 +935,14 @@ struct drm_syncobj_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; struct drm_syncobj_timeline_wait { @@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait { __u32 flags; __u32 first_signaled; /* only valid when not waiting all */ __u32 pad; + /** + * @deadline_nsec - fence deadline hint + * + * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing + * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is + * set. + */ + __u64 deadline_nsec; }; /** @@ -1218,6 +1268,26 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) +/** + * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. + * + * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable + * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept + * alive. When the plane no longer uses the framebuffer (because the + * framebuffer is replaced with another one, or the plane is disabled), the + * framebuffer is cleaned up. + * + * This is useful to implement flicker-free transitions between two processes. + * + * Depending on the threat model, user-space may want to ensure that the + * framebuffer doesn't expose any sensitive user information: closed + * framebuffers attached to a plane can be read back by the next DRM master. + */ +#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 128d09138ceb..95630f170110 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -957,6 +957,15 @@ struct hdr_output_metadata { * Request that the page-flip is performed as soon as possible, ie. with no * delay due to waiting for vblank. This may cause tearing to be visible on * the screen. + * + * When used with atomic uAPI, the driver will return an error if the hardware + * doesn't support performing an asynchronous page-flip for this update. + * User-space should handle this, e.g. by falling back to a regular page-flip. + * + * Note, some hardware might need to perform one last synchronous page-flip + * before being able to switch to asynchronous page-flips. As an exception, + * the driver will return success even though that first page-flip is not + * asynchronous. */ #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 @@ -1323,6 +1332,16 @@ struct drm_mode_rect { __s32 y2; }; +/** + * struct drm_mode_closefb + * @fb_id: Framebuffer ID. + * @pad: Must be zero. + */ +struct drm_mode_closefb { + __u32 fb_id; + __u32 pad; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 262db0c3beee..de1944e42c65 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -196,7 +196,7 @@ struct drm_ivpu_bo_create { * * %DRM_IVPU_BO_UNCACHED: * - * Allocated BO will not be cached on host side nor snooped on the VPU side. + * Not supported. Use DRM_IVPU_BO_WC instead. * * %DRM_IVPU_BO_WC: * diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h new file mode 100644 index 000000000000..ccf6c2112468 --- /dev/null +++ b/include/uapi/drm/pvr_drm.h @@ -0,0 +1,1295 @@ +/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DRM_UAPI_H +#define PVR_DRM_UAPI_H + +#include "drm.h" + +#include <linux/const.h> +#include <linux/types.h> + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: PowerVR UAPI + * + * The PowerVR IOCTL argument structs have a few limitations in place, in + * addition to the standard kernel restrictions: + * + * - All members must be type-aligned. + * - The overall struct must be padded to 64-bit alignment. + * - Explicit padding is almost always required. This takes the form of + * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two + * alignment, where [x] is the offset into the struct in hexadecimal. Arrays + * are never used for alignment. Padding fields must be zeroed; this is + * always checked. + * - Unions may only appear as the last member of a struct. + * - Individual union members may grow in the future. The space between the + * end of a union member and the end of its containing union is considered + * "implicit padding" and must be zeroed. This is always checked. + * + * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of + * DEV_QUERY argument structs. These are used to fetch information about the + * device and runtime. These structs are subject to the same rules set out + * above. + */ + +/** + * struct drm_pvr_obj_array - Container used to pass arrays of objects + * + * It is not unusual to have to extend objects to pass new parameters, and the DRM + * ioctl infrastructure is supporting that by padding ioctl arguments with zeros + * when the data passed by userspace is smaller than the struct defined in the + * drm_ioctl_desc, thus keeping things backward compatible. This type is just + * applying the same concepts to indirect objects passed through arrays referenced + * from the main ioctl arguments structure: the stride basically defines the size + * of the object passed by userspace, which allows the kernel driver to pad with + * zeros when it's smaller than the size of the object it expects. + * + * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you + * have a very good reason not to. + */ +struct drm_pvr_obj_array { + /** @stride: Stride of object struct. Used for versioning. */ + __u32 stride; + + /** @count: Number of objects in the array. */ + __u32 count; + + /** @array: User pointer to an array of objects. */ + __u64 array; +}; + +/** + * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array. + * @cnt: Number of elements pointed to py @ptr. + * @ptr: Pointer to start of a C array. + * + * Return: Literal of type &struct drm_pvr_obj_array. + */ +#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \ + { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) } + +/** + * DOC: PowerVR IOCTL interface + */ + +/** + * PVR_IOCTL() - Build a PowerVR IOCTL number + * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE. + * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR. + * @_data: The type of the args struct passed by this IOCTL. + * + * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an + * ``_args suffix``. They are therefore omitted from @_data. + * + * This should only be used to build the constants described below; it should + * never be used to call an IOCTL directly. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define PVR_IOCTL(_ioctl, _mode, _data) \ + _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args) + +#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query) +#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo) +#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset) +#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context) +#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context) +#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map) +#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap) +#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context) +#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context) +#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list) +#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list) +#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset) +#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset) +#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs) + +/** + * DOC: PowerVR IOCTL DEV_QUERY interface + */ + +/** + * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about + * the graphics processor. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET. + */ +struct drm_pvr_dev_query_gpu_info { + /** + * @gpu_id: GPU identifier. + * + * For all currently supported GPUs this is the BVNC encoded as a 64-bit + * value as follows: + * + * +--------+--------+--------+-------+ + * | 63..48 | 47..32 | 31..16 | 15..0 | + * +========+========+========+=======+ + * | B | V | N | C | + * +--------+--------+--------+-------+ + */ + __u64 gpu_id; + + /** + * @num_phantoms: Number of Phantoms present. + */ + __u32 num_phantoms; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * struct drm_pvr_dev_query_runtime_info - Container used to fetch information + * about the graphics runtime. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET. + */ +struct drm_pvr_dev_query_runtime_info { + /** + * @free_list_min_pages: Minimum allowed free list size, + * in PM physical pages. + */ + __u64 free_list_min_pages; + + /** + * @free_list_max_pages: Maximum allowed free list size, + * in PM physical pages. + */ + __u64 free_list_max_pages; + + /** + * @common_store_alloc_region_size: Size of the Allocation + * Region within the Common Store used for coefficient and shared + * registers, in dwords. + */ + __u32 common_store_alloc_region_size; + + /** + * @common_store_partition_space_size: Size of the + * Partition Space within the Common Store for output buffers, in + * dwords. + */ + __u32 common_store_partition_space_size; + + /** + * @max_coeffs: Maximum coefficients, in dwords. + */ + __u32 max_coeffs; + + /** + * @cdm_max_local_mem_size_regs: Maximum amount of local + * memory available to a compute kernel, in dwords. + */ + __u32 cdm_max_local_mem_size_regs; +}; + +/** + * struct drm_pvr_dev_query_quirks - Container used to fetch information about + * hardware fixes for which the device may require support in the user mode + * driver. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_QUIRKS_GET. + */ +struct drm_pvr_dev_query_quirks { + /** + * @quirks: A userspace address for the hardware quirks __u32 array. + * + * The first @musthave_count items in the list are quirks that the + * client must support for this device. If userspace does not support + * all these quirks then functionality is not guaranteed and client + * initialisation must fail. + * The remaining quirks in the list affect userspace and the kernel or + * firmware. They are disabled by default and require userspace to + * opt-in. The opt-in mechanism depends on the quirk. + */ + __u64 quirks; + + /** @count: Length of @quirks (number of __u32). */ + __u16 count; + + /** + * @musthave_count: The number of entries in @quirks that are + * mandatory, starting at index 0. + */ + __u16 musthave_count; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * struct drm_pvr_dev_query_enhancements - Container used to fetch information + * about optional enhancements supported by the device that require support in + * the user mode driver. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_ENHANCEMENTS_GET. + */ +struct drm_pvr_dev_query_enhancements { + /** + * @enhancements: A userspace address for the hardware enhancements + * __u32 array. + * + * These enhancements affect userspace and the kernel or firmware. They + * are disabled by default and require userspace to opt-in. The opt-in + * mechanism depends on the enhancement. + */ + __u64 enhancements; + + /** @count: Length of @enhancements (number of __u32). */ + __u16 count; + + /** @_padding_a: Reserved. This field must be zeroed. */ + __u16 _padding_a; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; +}; + +/** + * enum drm_pvr_heap_id - Array index for heap info data returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * For compatibility reasons all indices will be present in the returned array, + * however some heaps may not be present. These are indicated where + * &struct drm_pvr_heap.size is set to zero. + */ +enum drm_pvr_heap_id { + /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */ + DRM_PVR_HEAP_GENERAL = 0, + /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */ + DRM_PVR_HEAP_PDS_CODE_DATA, + /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */ + DRM_PVR_HEAP_USC_CODE, + /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */ + DRM_PVR_HEAP_RGNHDR, + /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */ + DRM_PVR_HEAP_VIS_TEST, + /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */ + DRM_PVR_HEAP_TRANSFER_FRAG, + + /** + * @DRM_PVR_HEAP_COUNT: The number of heaps returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * More heaps may be added, so this also serves as the copy limit when + * sent by the caller. + */ + DRM_PVR_HEAP_COUNT + /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */ +}; + +/** + * struct drm_pvr_heap - Container holding information about a single heap. + * + * This will always be fetched as an array. + */ +struct drm_pvr_heap { + /** @base: Base address of heap. */ + __u64 base; + + /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */ + __u64 size; + + /** @flags: Flags for this heap. Currently always 0. */ + __u32 flags; + + /** @page_size_log2: Log2 of page size. */ + __u32 page_size_log2; +}; + +/** + * struct drm_pvr_dev_query_heap_info - Container used to fetch information + * about heaps supported by the device driver. + * + * Please note all driver-supported heaps will be returned up to &heaps.count. + * Some heaps will not be present in all devices, which will be indicated by + * &struct drm_pvr_heap.size being set to zero. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ +struct drm_pvr_dev_query_heap_info { + /** + * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count + * and stride will be updated with those known to the driver version, to + * facilitate allocation by the caller. + */ + struct drm_pvr_obj_array heaps; +}; + +/** + * enum drm_pvr_static_data_area_usage - Array index for static data area info + * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET. + * + * For compatibility reasons all indices will be present in the returned array, + * however some areas may not be present. These are indicated where + * &struct drm_pvr_static_data_area.size is set to zero. + */ +enum drm_pvr_static_data_area_usage { + /** + * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment. + * + * The End of Tile PDS task runs at completion of a tile during a fragment job, and is + * responsible for emitting the tile to the Pixel Back End. + */ + DRM_PVR_STATIC_DATA_AREA_EOT = 0, + + /** + * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and + * invalidation. + * + * This must point to valid physical memory but the contents otherwise are not used. + */ + DRM_PVR_STATIC_DATA_AREA_FENCE, + + /** + * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program. + * + * The VDM sync program is used to synchronise multiple areas of the GPU hardware. + */ + DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, + + /** + * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients. + * + * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed + * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits. + * + * The slots are : + * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR + * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range) + * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range) + * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range) + * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range) + * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range) + * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range) + * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range) + * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range) + * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit) + * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit) + * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit) + * 14 = Identity (biased) + * 15 = Identity + */ + DRM_PVR_STATIC_DATA_AREA_YUV_CSC, +}; + +/** + * struct drm_pvr_static_data_area - Container holding information about a + * single static data area. + * + * This will always be fetched as an array. + */ +struct drm_pvr_static_data_area { + /** + * @area_usage: Usage of static data area. + * See &enum drm_pvr_static_data_area_usage. + */ + __u16 area_usage; + + /** + * @location_heap_id: Array index of heap where this of static data + * area is located. This array is fetched using + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ + __u16 location_heap_id; + + /** @size: Size of static data area. Not present if set to zero. */ + __u32 size; + + /** @offset: Offset of static data area from start of heap. */ + __u64 offset; +}; + +/** + * struct drm_pvr_dev_query_static_data_areas - Container used to fetch + * information about the static data areas in heaps supported by the device + * driver. + * + * Please note all driver-supported static data areas will be returned up to + * &static_data_areas.count. Some will not be present for all devices which, + * will be indicated by &struct drm_pvr_static_data_area.size being set to zero. + * + * Further, some heaps will not be present either. See &struct + * drm_pvr_dev_query_heap_info. + * + * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set + * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET. + */ +struct drm_pvr_dev_query_static_data_areas { + /** + * @static_data_areas: Array of &struct drm_pvr_static_data_area. If + * pointer is NULL, the count and stride will be updated with those + * known to the driver version, to facilitate allocation by the caller. + */ + struct drm_pvr_obj_array static_data_areas; +}; + +/** + * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to + * indicate the type of the receiving container. + * + * Append only. Do not reorder. + */ +enum drm_pvr_dev_query { + /** + * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer + * to &struct drm_pvr_dev_query_gpu_info. + */ + DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0, + + /** + * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_runtime_info. + */ + DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET, + + /** + * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer + * to &struct drm_pvr_dev_query_quirks. + */ + DRM_PVR_DEV_QUERY_QUIRKS_GET, + + /** + * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_enhancements. + */ + DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET, + + /** + * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a + * pointer to &struct drm_pvr_dev_query_heap_info. + */ + DRM_PVR_DEV_QUERY_HEAP_INFO_GET, + + /** + * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain + * a pointer to &struct drm_pvr_dev_query_static_data_areas. + */ + DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET, +}; + +/** + * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY. + */ +struct drm_pvr_ioctl_dev_query_args { + /** + * @type: Type of query and output struct. See &enum drm_pvr_dev_query. + */ + __u32 type; + + /** + * @size: Size of the receiving struct, see @type. + * + * After a successful call this will be updated to the written byte + * length. + * Can also be used to get the minimum byte length (see @pointer). + * This allows additional fields to be appended to the structs in + * future. + */ + __u32 size; + + /** + * @pointer: Pointer to struct @type. + * + * Must be large enough to contain @size bytes. + * If pointer is NULL, the expected size will be returned in the @size + * field, but no other data will be written. + */ + __u64 pointer; +}; + +/** + * DOC: PowerVR IOCTL CREATE_BO interface + */ + +/** + * DOC: Flags for CREATE_BO + * + * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some + * fonts. + * + * Device mapping options + * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the + * cache. This is used for buffers that will either be regularly updated by the CPU (eg free + * lists) or will be accessed only once and therefore isn't worth caching (eg partial render + * buffers). + * By default, the device flushes its memory caches after every job, so this is not normally + * required for coherency. + * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware + * processor should be allowed to access this memory when mapped to the device. It is not + * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS. + * + * CPU mapping options + * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this + * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT. + */ +#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0) +#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1) +#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2) +/* Bits 3..63 are reserved. */ + +#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \ + DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) + +/** + * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO + */ +struct drm_pvr_ioctl_create_bo_args { + /** + * @size: [IN] Size of buffer object to create. This must be page size + * aligned. + */ + __u64 size; + + /** + * @handle: [OUT] GEM handle of the new buffer object for use in + * userspace. + */ + __u32 handle; + + /** @_padding_c: Reserved. This field must be zeroed. */ + __u32 _padding_c; + + /** + * @flags: [IN] Options which will affect the behaviour of this + * creation operation and future mapping operations on the created + * object. This field must be a valid combination of ``DRM_PVR_BO_*`` + * values, with all bits marked as reserved set to zero. + */ + __u64 flags; +}; + +/** + * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface + */ + +/** + * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for + * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET + * + * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory. + * Instead, it allocates a fake offset which refers to the specified buffer + * object. This offset can be used with a real mmap call on the DRM device + * itself. + */ +struct drm_pvr_ioctl_get_bo_mmap_offset_args { + /** @handle: [IN] GEM handle of the buffer object to be mapped. */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; + + /** @offset: [OUT] Fake offset to use in the real mmap call. */ + __u64 offset; +}; + +/** + * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces + */ + +/** + * struct drm_pvr_ioctl_create_vm_context_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT + */ +struct drm_pvr_ioctl_create_vm_context_args { + /** @handle: [OUT] Handle for new VM context. */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT + */ +struct drm_pvr_ioctl_destroy_vm_context_args { + /** + * @handle: [IN] Handle for VM context to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces + * + * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space. + * + * The client is responsible for managing GPU address space. It should allocate mappings within + * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + * + * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for + * the mapping. Size and offset within the mapped buffer object can be specified, so the client can + * partially map a buffer. + * + * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address + * space only if the size of the mapping matches that known to the driver. + */ + +/** + * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP. + */ +struct drm_pvr_ioctl_vm_map_args { + /** + * @vm_context_handle: [IN] Handle for VM context for this mapping to + * exist in. + */ + __u32 vm_context_handle; + + /** @flags: [IN] Flags which affect this mapping. Currently always 0. */ + __u32 flags; + + /** + * @device_addr: [IN] Requested device-virtual address for the mapping. + * This must be non-zero and aligned to the device page size for the + * heap containing the requested address. It is an error to specify an + * address which is not contained within one of the heaps returned by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET. + */ + __u64 device_addr; + + /** + * @handle: [IN] Handle of the target buffer object. This must be a + * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO. + */ + __u32 handle; + + /** @_padding_14: Reserved. This field must be zeroed. */ + __u32 _padding_14; + + /** + * @offset: [IN] Offset into the target bo from which to begin the + * mapping. + */ + __u64 offset; + + /** + * @size: [IN] Size of the requested mapping. Must be aligned to + * the device page size for the heap containing the requested address, + * as well as the host page size. When added to @device_addr, the + * result must not overflow the heap which contains @device_addr (i.e. + * the range specified by @device_addr and @size must be completely + * contained within a single heap specified by + * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET). + */ + __u64 size; +}; + +/** + * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP. + */ +struct drm_pvr_ioctl_vm_unmap_args { + /** + * @vm_context_handle: [IN] Handle for VM context that this mapping + * exists in. + */ + __u32 vm_context_handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; + + /** + * @device_addr: [IN] Device-virtual address at the start of the target + * mapping. This must be non-zero. + */ + __u64 device_addr; + + /** + * @size: Size in bytes of the target mapping. This must be non-zero. + */ + __u64 size; +}; + +/** + * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces + */ + +/** + * enum drm_pvr_ctx_priority - Arguments for + * &drm_pvr_ioctl_create_context_args.priority + */ +enum drm_pvr_ctx_priority { + /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */ + DRM_PVR_CTX_PRIORITY_LOW = -512, + + /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */ + DRM_PVR_CTX_PRIORITY_NORMAL = 0, + + /** + * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal. + * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``. + */ + DRM_PVR_CTX_PRIORITY_HIGH = 512, +}; + +/** + * enum drm_pvr_ctx_type - Arguments for + * &struct drm_pvr_ioctl_create_context_args.type + */ +enum drm_pvr_ctx_type { + /** + * @DRM_PVR_CTX_TYPE_RENDER: Render context. + */ + DRM_PVR_CTX_TYPE_RENDER = 0, + + /** + * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. + */ + DRM_PVR_CTX_TYPE_COMPUTE, + + /** + * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data + * master. + */ + DRM_PVR_CTX_TYPE_TRANSFER_FRAG, +}; + +/** + * struct drm_pvr_ioctl_create_context_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_CONTEXT + */ +struct drm_pvr_ioctl_create_context_args { + /** + * @type: [IN] Type of context to create. + * + * This must be one of the values defined by &enum drm_pvr_ctx_type. + */ + __u32 type; + + /** @flags: [IN] Flags for context. */ + __u32 flags; + + /** + * @priority: [IN] Priority of new context. + * + * This must be one of the values defined by &enum drm_pvr_ctx_priority. + */ + __s32 priority; + + /** @handle: [OUT] Handle for new context. */ + __u32 handle; + + /** + * @static_context_state: [IN] Pointer to static context state stream. + */ + __u64 static_context_state; + + /** + * @static_context_state_len: [IN] Length of static context state, in bytes. + */ + __u32 static_context_state_len; + + /** + * @vm_context_handle: [IN] Handle for VM context that this context is + * associated with. + */ + __u32 vm_context_handle; + + /** + * @callstack_addr: [IN] Address for initial call stack pointer. Only valid + * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0. + */ + __u64 callstack_addr; +}; + +/** + * struct drm_pvr_ioctl_destroy_context_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_CONTEXT + */ +struct drm_pvr_ioctl_destroy_context_args { + /** + * @handle: [IN] Handle for context to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces + */ + +/** + * struct drm_pvr_ioctl_create_free_list_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_FREE_LIST + * + * Free list arguments have the following constraints : + * + * - @max_num_pages must be greater than zero. + * - @grow_threshold must be between 0 and 100. + * - @grow_num_pages must be less than or equal to &max_num_pages. + * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples + * of 4. + * - When &grow_num_pages is 0, @initial_num_pages must be equal to + * @max_num_pages. + * - When &grow_num_pages is non-zero, @initial_num_pages must be less than + * @max_num_pages. + */ +struct drm_pvr_ioctl_create_free_list_args { + /** + * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object + * containing memory to be used by free list. + * + * The mapped region of the buffer object must be at least + * @max_num_pages * ``sizeof(__u32)``. + * + * The buffer object must have been created with + * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and + * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set. + */ + __u64 free_list_gpu_addr; + + /** @initial_num_pages: [IN] Pages initially allocated to free list. */ + __u32 initial_num_pages; + + /** @max_num_pages: [IN] Maximum number of pages in free list. */ + __u32 max_num_pages; + + /** @grow_num_pages: [IN] Pages to grow free list by per request. */ + __u32 grow_num_pages; + + /** + * @grow_threshold: [IN] Percentage of FL memory used that should + * trigger a new grow request. + */ + __u32 grow_threshold; + + /** + * @vm_context_handle: [IN] Handle for VM context that the free list buffer + * object is mapped in. + */ + __u32 vm_context_handle; + + /** + * @handle: [OUT] Handle for created free list. + */ + __u32 handle; +}; + +/** + * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_FREE_LIST + */ +struct drm_pvr_ioctl_destroy_free_list_args { + /** + * @handle: [IN] Handle for free list to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces + */ + +/** + * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for + * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args. + */ +struct drm_pvr_create_hwrt_geom_data_args { + /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */ + __u64 tpc_dev_addr; + + /** @tpc_size: [IN] Size of TPC, in bytes. */ + __u32 tpc_size; + + /** @tpc_stride: [IN] Stride between layers in TPC, in pages */ + __u32 tpc_stride; + + /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */ + __u64 vheap_table_dev_addr; + + /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */ + __u64 rtc_dev_addr; +}; + +/** + * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for + * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args. + */ +struct drm_pvr_create_hwrt_rt_data_args { + /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */ + __u64 pm_mlist_dev_addr; + + /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */ + __u64 macrotile_array_dev_addr; + + /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */ + __u64 region_header_dev_addr; +}; + +#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0 +#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U + +/** + * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for + * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET + */ +struct drm_pvr_ioctl_create_hwrt_dataset_args { + /** @geom_data_args: [IN] Geometry data arguments. */ + struct drm_pvr_create_hwrt_geom_data_args geom_data_args; + + /** + * @rt_data_args: [IN] Array of render target arguments. + * + * Each entry in this array represents a render target in a double buffered + * setup. + */ + struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2]; + + /** + * @free_list_handles: [IN] Array of free list handles. + * + * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial + * size of at least that reported by + * &drm_pvr_dev_query_runtime_info.free_list_min_pages. + */ + __u32 free_list_handles[2]; + + /** @width: [IN] Width in pixels. */ + __u32 width; + + /** @height: [IN] Height in pixels. */ + __u32 height; + + /** @samples: [IN] Number of samples. */ + __u32 samples; + + /** @layers: [IN] Number of layers. */ + __u32 layers; + + /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */ + __u32 isp_merge_lower_x; + + /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */ + __u32 isp_merge_lower_y; + + /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */ + __u32 isp_merge_scale_x; + + /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */ + __u32 isp_merge_scale_y; + + /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */ + __u32 isp_merge_upper_x; + + /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */ + __u32 isp_merge_upper_y; + + /** + * @region_header_size: [IN] Size of region header array. This common field is used by + * both render targets in this data set. + * + * The units for this field differ depending on what version of the simple internal + * parameter format the device uses. If format 2 is in use then this is interpreted as the + * number of region headers. For other formats it is interpreted as the size in dwords. + */ + __u32 region_header_size; + + /** + * @handle: [OUT] Handle for created HWRT dataset. + */ + __u32 handle; +}; + +/** + * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for + * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET + */ +struct drm_pvr_ioctl_destroy_hwrt_dataset_args { + /** + * @handle: [IN] Handle for HWRT dataset to be destroyed. + */ + __u32 handle; + + /** @_padding_4: Reserved. This field must be zeroed. */ + __u32 _padding_4; +}; + +/** + * DOC: PowerVR IOCTL SUBMIT_JOBS interface + */ + +/** + * DOC: Flags for the drm_pvr_sync_op object. + * + * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK + * + * Handle type mask for the drm_pvr_sync_op::flags field. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ + * + * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle. + * This is the default type. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ + * + * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL + * + * Signal operation requested. The out-fence bound to the job will be attached to + * the syncobj whose handle is passed in drm_pvr_sync_op::handle. + * + * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT + * + * Wait operation requested. The job will wait for this particular syncobj or syncobj + * point to be signaled before being started. + * This is the default operation. + */ +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0 +#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1 +#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31) +#define DRM_PVR_SYNC_OP_FLAG_WAIT 0 + +#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \ + DRM_PVR_SYNC_OP_FLAG_SIGNAL) + +/** + * struct drm_pvr_sync_op - Object describing a sync operation + */ +struct drm_pvr_sync_op { + /** @handle: Handle of sync object. */ + __u32 handle; + + /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */ + __u32 flags; + + /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */ + __u64 value; +}; + +/** + * DOC: Flags for SUBMIT_JOB ioctl geometry command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST + * + * Indicates if this the first command to be issued for a render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST + * + * Indicates if this the last command to be issued for a render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE + * + * Forces to use single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK + * + * Logical OR of all the geometry cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2) +#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \ + DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \ + DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl fragment command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE + * + * Use single core in a multi core setup. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER + * + * Indicates whether a depth buffer is present. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER + * + * Indicates whether a stencil buffer is present. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP + * + * Disallow compute overlapped with this render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS + * + * Indicates whether this render produces visibility results. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER + * + * Indicates whether partial renders write to a scratch buffer instead of + * the final surface. It also forces the full screen copy expected to be + * present on the last render after all partial renders have completed. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE + * + * Disable pixel merging for this render. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK + * + * Logical OR of all the fragment cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7) +#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \ + DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl compute command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP + * + * Disallow other jobs overlapped with this compute. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE + * + * Forces to use single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK + * + * Logical OR of all the compute cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0) +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1) +#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \ + (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \ + DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE) + +/** + * DOC: Flags for SUBMIT_JOB ioctl transfer command. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE + * + * Forces job to use a single core in a multi core device. + * + * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK + * + * Logical OR of all the transfer cmd flags. + */ +#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0) + +#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \ + DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE + +/** + * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type + */ +enum drm_pvr_job_type { + /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */ + DRM_PVR_JOB_TYPE_GEOMETRY = 0, + + /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */ + DRM_PVR_JOB_TYPE_FRAGMENT, + + /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */ + DRM_PVR_JOB_TYPE_COMPUTE, + + /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */ + DRM_PVR_JOB_TYPE_TRANSFER_FRAG, +}; + +/** + * struct drm_pvr_hwrt_data_ref - Reference HWRT data + */ +struct drm_pvr_hwrt_data_ref { + /** @set_handle: HWRT data set handle. */ + __u32 set_handle; + + /** @data_index: Index of the HWRT data inside the data set. */ + __u32 data_index; +}; + +/** + * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl + */ +struct drm_pvr_job { + /** + * @type: [IN] Type of job being submitted + * + * This must be one of the values defined by &enum drm_pvr_job_type. + */ + __u32 type; + + /** + * @context_handle: [IN] Context handle. + * + * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or + * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by + * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible + * with the type of job being submitted. + * + * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero. + */ + __u32 context_handle; + + /** + * @flags: [IN] Flags for command. + * + * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``. + */ + __u32 flags; + + /** + * @cmd_stream_len: [IN] Length of command stream, in bytes. + */ + __u32 cmd_stream_len; + + /** + * @cmd_stream: [IN] Pointer to command stream for command. + * + * The command stream must be u64-aligned. + */ + __u64 cmd_stream; + + /** @sync_ops: [IN] Fragment sync operations. */ + struct drm_pvr_obj_array sync_ops; + + /** + * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment). + * + * Must be zero for non-render jobs. + */ + struct drm_pvr_hwrt_data_ref hwrt; +}; + +/** + * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB + * + * If the syscall returns an error it is important to check the value of + * @jobs.count. This indicates the index into @jobs.array where the + * error occurred. + */ +struct drm_pvr_ioctl_submit_jobs_args { + /** @jobs: [IN] Array of jobs to submit. */ + struct drm_pvr_obj_array jobs; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* PVR_DRM_UAPI_H */ diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h index 43ac5d864512..9dab32316aee 100644 --- a/include/uapi/drm/qaic_accel.h +++ b/include/uapi/drm/qaic_accel.h @@ -287,8 +287,9 @@ struct qaic_execute_entry { * struct qaic_partial_execute_entry - Defines a BO to resize and submit. * @handle: In. GEM handle of the BO to commit to the device. * @dir: In. Direction of data. 1 = to device, 2 = from device. - * @resize: In. New size of the BO. Must be <= the original BO size. 0 is - * short for no resize. + * @resize: In. New size of the BO. Must be <= the original BO size. + * @resize as 0 would be interpreted as no DMA transfer is + * involved. */ struct qaic_partial_execute_entry { __u32 handle; diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 3dfc0af8756a..dce1835eced4 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -41,6 +41,7 @@ extern "C" { #define DRM_V3D_PERFMON_CREATE 0x08 #define DRM_V3D_PERFMON_DESTROY 0x09 #define DRM_V3D_PERFMON_GET_VALUES 0x0a +#define DRM_V3D_SUBMIT_CPU 0x0b #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -56,6 +57,7 @@ extern "C" { struct drm_v3d_perfmon_destroy) #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ struct drm_v3d_perfmon_get_values) +#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 #define DRM_V3D_SUBMIT_EXTENSION 0x02 @@ -69,7 +71,13 @@ extern "C" { struct drm_v3d_extension { __u64 next; __u32 id; -#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 +#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 +#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 +#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 +#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 +#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06 +#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07 __u32 flags; /* mbz */ }; @@ -93,6 +101,7 @@ enum v3d_queue { V3D_TFU, V3D_CSD, V3D_CACHE_CLEAN, + V3D_CPU, }; /** @@ -276,6 +285,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, DRM_V3D_PARAM_SUPPORTS_PERFMON, DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, + DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, }; struct drm_v3d_get_param { @@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu { /* Pointer to an array of ioctl extensions*/ __u64 extensions; + + struct { + __u32 ioc; + __u32 pad; + } v71; }; /* Submits a compute shader for dispatch. This job will block on any @@ -356,6 +371,234 @@ struct drm_v3d_submit_csd { __u32 pad; }; +/** + * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an + * indirect CSD + * + * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it + * points to this extension to define a indirect CSD submission. It creates a + * CPU job linked to a CSD job. The CPU job waits for the indirect CSD + * dependencies and, once they are signaled, it updates the CSD job config + * before allowing the CSD job execution. + */ +struct drm_v3d_indirect_csd { + struct drm_v3d_extension base; + + /* Indirect CSD */ + struct drm_v3d_submit_csd submit; + + /* Handle of the indirect BO, that should be also attached to the + * indirect CSD. + */ + __u32 indirect; + + /* Offset within the BO where the workgroup counts are stored */ + __u32 offset; + + /* Workgroups size */ + __u32 wg_size; + + /* Indices of the uniforms with the workgroup dispatch counts + * in the uniform stream. If the uniform rewrite is not needed, + * the offset must be 0xffffffff. + */ + __u32 wg_uniform_offsets[3]; +}; + +/** + * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate + * a timestamp query + * + * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to + * this extension to define a timestamp query submission. This CPU job will + * calculate the timestamp query and update the query value within the + * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate + * query availability. + */ +struct drm_v3d_timestamp_query { + struct drm_v3d_extension base; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* mbz */ + __u32 pad; +}; + +/** + * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to + * reset timestamp queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it + * points to this extension to define a reset timestamp submission. This CPU + * job will reset the timestamp queries based on value offset of the first + * query. Moreover, it will reset the timestamp syncobj to reset query + * availability. + */ +struct drm_v3d_reset_timestamp_query { + struct drm_v3d_extension base; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; + + /* Offset of the first query within the timestamp BO for its value */ + __u32 offset; + + /* Number of queries */ + __u32 count; +}; + +/** + * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy + * query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it + * points to this extension to define a copy timestamp query submission. This + * CPU job will copy the timestamp queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_timestamp_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of queries */ + __u32 count; + + /* Array of queries' offsets within the timestamp BO for their value */ + __u64 offsets; + + /* Array of timestamp's syncobjs to indicate its availability */ + __u64 syncs; +}; + +/** + * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to + * reset performance queries + * + * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it + * points to this extension to define a reset performance submission. This CPU + * job will reset the performance queries by resetting the values of the + * performance monitors. Moreover, it will reset the syncobj to reset query + * availability. + */ +struct drm_v3d_reset_performance_query { + struct drm_v3d_extension base; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Number of queries */ + __u32 count; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + +/** + * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy + * performance query results to a buffer + * + * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it + * points to this extension to define a copy performance query submission. This + * CPU job will copy the performance queries results to a BO with the offset + * and stride defined in the extension. + */ +struct drm_v3d_copy_performance_query { + struct drm_v3d_extension base; + + /* Define if should write to buffer using 64 or 32 bits */ + __u8 do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + __u8 do_partial; + + /* Define if it should write availability bit to buffer */ + __u8 availability_bit; + + /* mbz */ + __u8 pad; + + /* Offset of the buffer in the BO */ + __u32 offset; + + /* Stride of the buffer in the BO */ + __u32 stride; + + /* Number of performance monitors */ + __u32 nperfmons; + + /* Number of performance counters related to this query pool */ + __u32 ncounters; + + /* Number of queries */ + __u32 count; + + /* Array of performance queries's syncobjs to indicate its availability */ + __u64 syncs; + + /* Array of u64 user-pointers that point to an array of kperfmon_ids */ + __u64 kperfmon_ids; +}; + +struct drm_v3d_submit_cpu { + /* Pointer to a u32 array of the BOs that are referenced by the job. + * + * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO, + * that contains the workgroup counts. + * + * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO, + * that will contain the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only + * one BO, that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two + * BOs. The first is the BO where the timestamp queries will be written + * to. The second is the BO that contains the timestamp. + * + * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no + * BOs. + * + * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one + * BO, where the performance queries will be written. + */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + __u32 flags; + + /* Pointer to an array of ioctl extensions*/ + __u64 extensions; +}; + enum { V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, V3D_PERFCNT_FEP_VALID_PRIMS, diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index b1d0e56565bc..c2ce71987e9b 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -97,6 +97,7 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */ #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */ #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */ +#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */ struct drm_virtgpu_getparam { __u64 param; @@ -198,6 +199,7 @@ struct drm_virtgpu_resource_create_blob { #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001 #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002 #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003 +#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004 struct drm_virtgpu_context_set_param { __u64 param; __u64 value; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index c25fc9614594..d24e8e121507 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -219,6 +219,22 @@ */ #define BTRFS_METADATA_ITEM_KEY 169 +/* + * Special inline ref key which stores the id of the subvolume which originally + * created the extent. This subvolume owns the extent permanently from the + * perspective of simple quotas. Needed to know which subvolume to free quota + * usage from when the extent is deleted. + * + * Stored as an inline ref rather to avoid wasting space on a separate item on + * top of the existing extent item. However, unlike the other inline refs, + * there is one one owner ref per extent rather than one per extent. + * + * Because of this, it goes at the front of the list of inline refs, and thus + * must have a lower type value than any other inline ref type (to satisfy the + * disk format rule that inline refs have non-decreasing type). + */ +#define BTRFS_EXTENT_OWNER_REF_KEY 172 + #define BTRFS_TREE_BLOCK_REF_KEY 176 #define BTRFS_EXTENT_DATA_REF_KEY 178 @@ -234,14 +250,6 @@ #define BTRFS_SHARED_DATA_REF_KEY 184 /* - * Special inline ref key which stores the id of the subvolume which originally - * created the extent. This subvolume owns the extent permanently from the - * perspective of simple quotas. Needed to know which subvolume to free quota - * usage from when the extent is deleted. - */ -#define BTRFS_EXTENT_OWNER_REF_KEY 188 - -/* * block groups give us hints into the extent allocation trees. Which * blocks are free etc etc */ diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 6c80f96049bd..282e90aeb163 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -116,5 +116,8 @@ #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to compare object identity and may not be usable to open_by_handle_at(2) */ +#if defined(__KERNEL__) +#define AT_GETATTR_NOSEC 0x80000000 +#endif #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h index ff0a931833e2..ff1f38889dcf 100644 --- a/include/uapi/linux/sync_file.h +++ b/include/uapi/linux/sync_file.h @@ -76,6 +76,27 @@ struct sync_file_info { __u64 sync_fence_info; }; +/** + * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence + * @deadline_ns: absolute time of the deadline + * @pad: must be zero + * + * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline + * + * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For + * example + * + * clock_gettime(CLOCK_MONOTONIC, &t); + * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline + */ +struct sync_set_deadline { + __u64 deadline_ns; + /* Not strictly needed for alignment but gives some possibility + * for future extension: + */ + __u64 pad; +}; + #define SYNC_IOC_MAGIC '>' /* @@ -87,5 +108,6 @@ struct sync_file_info { #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) #define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) +#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline) #endif /* _UAPI_LINUX_SYNC_H */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index f703afc7ad31..44f4dd2add18 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -166,6 +166,17 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + __le16 queue_notify_data; /* read-write */ + __le16 queue_reset; /* read-write */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; diff --git a/include/xen/events.h b/include/xen/events.h index 23932b0673dc..3b07409f8032 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -88,7 +88,6 @@ void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); -void xen_set_irq_pending(int irq); bool xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the @@ -101,8 +100,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout); /* Determine the IRQ which is bound to an event channel */ unsigned int irq_from_evtchn(evtchn_port_t evtchn); -int irq_from_virq(unsigned int cpu, unsigned int virq); -evtchn_port_t evtchn_from_irq(unsigned irq); +int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq, + evtchn_port_t *evtchn); int xen_set_callback_via(uint64_t via); int xen_evtchn_do_upcall(void); @@ -122,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, /* De-allocates the above mentioned physical interrupt. */ int xen_destroy_irq(int irq); -/* Return irq from pirq */ -int xen_irq_from_pirq(unsigned pirq); - /* Return the pirq allocated to the irq. */ int xen_pirq_from_irq(unsigned irq); |