diff options
Diffstat (limited to 'include')
30 files changed, 1169 insertions, 57 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 724c45e3e9a7..9be85b821aa6 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -22,6 +22,9 @@ #ifndef __AMD_ASIC_TYPE_H__ #define __AMD_ASIC_TYPE_H__ + +#include <linux/types.h> + /* * Supported ASIC types */ diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index e0c105051246..9764d6eb5beb 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -11,9 +11,11 @@ #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_of.h> +#include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +struct platform_device; struct samsung_dsim; #define DSIM_STATE_ENABLED BIT(0) diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index a62fcd051d4d..f3a28af7c1b6 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -422,7 +422,18 @@ struct drm_dp_aux { * @wait_hpd_asserted: wait for HPD to be asserted * * This is mainly useful for eDP panels drivers to wait for an eDP - * panel to finish powering on. This is an optional function. + * panel to finish powering on. It is optional for DP AUX controllers + * to implement this function. It is required for DP AUX endpoints + * (panel drivers) to call this function after powering up but before + * doing AUX transfers unless the DP AUX endpoint driver knows that + * we're not using the AUX controller's HPD. One example of the panel + * driver not needing to call this is if HPD is hooked up to a GPIO + * that the panel driver can read directly. + * + * If a DP AUX controller does not implement this function then it + * may still support eDP panels that use the AUX controller's built-in + * HPD signal by implementing a long wait for HPD in the transfer() + * callback, though this is deprecated. * * This function will efficiently wait for the HPD signal to be * asserted. The `wait_us` parameter that is passed in says that we @@ -722,7 +733,7 @@ static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, #endif -#ifdef CONFIG_DRM_DP_CEC +#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CEC void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, struct drm_connector *connector); diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 9b19d8bd520a..3ae88a383a41 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -83,7 +83,6 @@ struct drm_dp_mst_branch; * @passthrough_aux: parent aux to which DSC pass-through requests should be * sent, only set if DSC pass-through is possible. * @parent: branch device parent of this port - * @vcpi: Virtual Channel Payload info for this port. * @connector: DRM connector this port is connected to. Protected by * &drm_dp_mst_topology_mgr.base.lock. * @mgr: topology manager this port lives under. diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index d47458ecdac4..bc0e66f9c425 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -141,6 +141,13 @@ struct drm_client_buffer { /** * @gem: GEM object backing this buffer + * + * FIXME: The dependency on GEM here isn't required, we could + * convert the driver handle to a dma-buf instead and use the + * backend-agnostic dma-buf vmap support instead. This would + * require that the handle2fd prime ioctl is reworked to pull the + * fd_install step out of the driver backend hooks, to make that + * final step optional for internal users. */ struct drm_gem_object *gem; @@ -159,6 +166,9 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, + struct iosys_map *map_copy); +void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h index b225eeb30d05..1b4c98c2f838 100644 --- a/include/drm/drm_debugfs_crc.h +++ b/include/drm/drm_debugfs_crc.h @@ -22,13 +22,19 @@ #ifndef __DRM_DEBUGFS_CRC_H__ #define __DRM_DEBUGFS_CRC_H__ +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/wait.h> + +struct drm_crtc; + #define DRM_MAX_CRC_NR 10 /** * struct drm_crtc_crc_entry - entry describing a frame's content * @has_frame_counter: whether the source was able to provide a frame number * @frame: number of the frame this CRC is about, if @has_frame_counter is true - * @crc: array of values that characterize the frame + * @crcs: array of values that characterize the frame */ struct drm_crtc_crc_entry { bool has_frame_counter; diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7923bc00dc7a..6f65bbf655a1 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -312,6 +312,13 @@ struct edid { u8 checksum; } __packed; +/* EDID matching */ +struct drm_edid_ident { + /* ID encoded by drm_edid_encode_panel_id() */ + u32 panel_id; + const char *name; +}; + #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) /* Short Audio Descriptor */ @@ -410,7 +417,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, void *data); struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); -u32 drm_edid_get_panel_id(struct i2c_adapter *adapter); +const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter); +u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid); +bool drm_edid_match(const struct drm_edid *drm_edid, + const struct drm_edid_ident *ident); struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 7214101fd731..49172166a164 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -34,12 +34,6 @@ /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver - * @set_config: Initialize any encoder-specific modesetting parameters. - * The meaning of the @params parameter is implementation - * dependent. It will usually be a structure with DVO port - * data format settings or timings. It's not required for - * the new parameters to take effect until the next mode - * is set. * * Most of its members are analogous to the function pointers in * &drm_encoder_helper_funcs and they can optionally be used to @@ -48,41 +42,85 @@ * if the encoder is the currently selected one for the connector. */ struct drm_encoder_slave_funcs { + /** + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation dependent. It + * will usually be a structure with DVO port data format settings or + * timings. It's not required for the new parameters to take effect + * until the next mode is set. + */ void (*set_config)(struct drm_encoder *encoder, void *params); + /** + * @destroy: Analogous to &drm_encoder_funcs @destroy callback. + */ void (*destroy)(struct drm_encoder *encoder); + + /** + * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback. Wrapped + * by drm_i2c_encoder_dpms(). + */ void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @save: Save state. Wrapped by drm_i2c_encoder_save(). + */ void (*save)(struct drm_encoder *encoder); + + /** + * @restore: Restore state. Wrapped by drm_i2c_encoder_restore(). + */ void (*restore)(struct drm_encoder *encoder); + + /** + * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup + * callback. Wrapped by drm_i2c_encoder_mode_fixup(). + */ bool (*mode_fixup)(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + + /** + * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid. + */ int (*mode_valid)(struct drm_encoder *encoder, struct drm_display_mode *mode); + /** + * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set + * callback. Wrapped by drm_i2c_encoder_mode_set(). + */ void (*mode_set)(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @detect: Analogous to &drm_encoder_helper_funcs @detect + * callback. Wrapped by drm_i2c_encoder_detect(). + */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @get_modes: Get modes. + */ int (*get_modes)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @create_resources: Create resources. + */ int (*create_resources)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @set_property: Set property. + */ int (*set_property)(struct drm_encoder *encoder, struct drm_connector *connector, struct drm_property *property, uint64_t val); - }; /** * struct drm_encoder_slave - Slave encoder struct - * @base: DRM encoder object. - * @slave_funcs: Slave encoder callbacks. - * @slave_priv: Slave encoder private data. - * @bus_priv: Bus specific data. * * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the * ones in @base. The former are never actually called by the common @@ -95,10 +133,24 @@ struct drm_encoder_slave_funcs { * this. */ struct drm_encoder_slave { + /** + * @base: DRM encoder object. + */ struct drm_encoder base; + /** + * @slave_funcs: Slave encoder callbacks. + */ const struct drm_encoder_slave_funcs *slave_funcs; + + /** + * @slave_priv: Slave encoder private data. + */ void *slave_priv; + + /** + * @bus_priv: Bus specific data. + */ void *bus_priv; }; #define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) @@ -112,16 +164,20 @@ int drm_i2c_encoder_init(struct drm_device *dev, /** * struct drm_i2c_encoder_driver * - * Describes a device driver for an encoder connected to the GPU - * through an I2C bus. In addition to the entry points in @i2c_driver - * an @encoder_init function should be provided. It will be called to - * give the driver an opportunity to allocate any per-encoder data - * structures and to initialize the @slave_funcs and (optionally) - * @slave_priv members of @encoder. + * Describes a device driver for an encoder connected to the GPU through an I2C + * bus. */ struct drm_i2c_encoder_driver { + /** + * @i2c_driver: I2C device driver description. + */ struct i2c_driver i2c_driver; + /** + * @encoder_init: Callback to allocate any per-encoder data structures + * and to initialize the @slave_funcs and (optionally) @slave_priv + * members of @encoder. + */ int (*encoder_init)(struct i2c_client *client, struct drm_device *dev, struct drm_encoder_slave *encoder); @@ -133,6 +189,7 @@ struct drm_i2c_encoder_driver { /** * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + * @encoder: The encoder */ static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) { diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index f13b34e0b752..428d81afe215 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -25,6 +25,7 @@ struct iosys_map; * All fields are considered private. */ struct drm_format_conv_state { + /* private: */ struct { void *mem; size_t size; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 2ebec3984cd4..bae4865b2101 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -527,6 +527,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); +void drm_gem_lock(struct drm_gem_object *obj); +void drm_gem_unlock(struct drm_gem_object *obj); + int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index bf0c31aa8fbe..efbc9f27312b 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -108,6 +108,9 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); + int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) @@ -173,7 +176,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_pin(shmem); + return drm_gem_shmem_pin_locked(shmem); } /** @@ -187,7 +190,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_unpin(shmem); + drm_gem_shmem_unpin_locked(shmem); } /** diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 2938ba80750d..9a73f786f4ad 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -170,7 +170,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( * @vram_base: Base address of the managed video memory * @vram_size: Size of the managed video memory in bytes * @bdev: The TTM BO device. - * @funcs: TTM BO functions * * The fields &struct drm_vram_mm.vram_base and * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 6e99627edf45..e7cc17ee4934 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -75,7 +75,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, * @_dev: The parent device object * @_type: the type of the struct which contains struct &drm_device * @_member: the name of the &drm_device within @_type. - * @_features: Mocked DRM device driver features + * @_feat: Mocked DRM device driver features * * This function creates a struct &drm_driver and will create a struct * &drm_device from @_dev and that driver. diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h index 5c9ef6a2aeae..53545b4ca9ef 100644 --- a/include/drm/drm_lease.h +++ b/include/drm/drm_lease.h @@ -6,6 +6,8 @@ #ifndef _DRM_LEASE_H_ #define _DRM_LEASE_H_ +#include <linux/types.h> + struct drm_file; struct drm_device; struct drm_master; diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 082a6e980d01..02d1cdd7f798 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -2,6 +2,7 @@ #ifndef __DRM_OF_H__ #define __DRM_OF_H__ +#include <linux/err.h> #include <linux/of_graph.h> #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) #include <drm/drm_bridge.h> diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h index c2188bb0b157..7ba72a81a808 100644 --- a/include/drm/drm_suballoc.h +++ b/include/drm/drm_suballoc.h @@ -37,7 +37,7 @@ struct drm_suballoc_manager { * @manager: The drm_suballoc_manager. * @soffset: Start offset. * @eoffset: End offset + 1 so that @eoffset - @soffset = size. - * @dma_fence: The fence protecting the allocation. + * @fence: The fence protecting the allocation. */ struct drm_suballoc { struct list_head olist; diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h index 8390b437a1f8..5305b9797f93 100644 --- a/include/drm/i2c/ch7006.h +++ b/include/drm/i2c/ch7006.h @@ -37,6 +37,7 @@ * meaning. */ struct ch7006_encoder_params { + /* private: FIXME: document the members */ enum { CH7006_FORMAT_RGB16 = 0, CH7006_FORMAT_YCrCb24m16, diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h index 205e27384c83..ddf248693c8b 100644 --- a/include/drm/i2c/sil164.h +++ b/include/drm/i2c/sil164.h @@ -36,6 +36,7 @@ * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". */ struct sil164_encoder_params { + /* private: FIXME: document the members */ enum { SIL164_INPUT_EDGE_FALLING = 0, SIL164_INPUT_EDGE_RISING diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/i915_gsc_proxy_mei_interface.h index 9462341d3ae1..850dfbf40607 100644 --- a/include/drm/i915_gsc_proxy_mei_interface.h +++ b/include/drm/i915_gsc_proxy_mei_interface.h @@ -21,7 +21,7 @@ struct i915_gsc_proxy_component_ops { struct module *owner; /** - * send - Sends a proxy message to ME FW. + * @send: Sends a proxy message to ME FW. * @dev: device struct corresponding to the mei device * @buf: message buffer to send * @size: size of the message @@ -30,7 +30,7 @@ struct i915_gsc_proxy_component_ops { int (*send)(struct device *dev, const void *buf, size_t size); /** - * recv - Receives a proxy message from ME FW. + * @recv: Receives a proxy message from ME FW. * @dev: device struct corresponding to the mei device * @buf: message buffer to contain the received message * @size: size of the buffer diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h index 4c9c8167c2d5..d776ed7dcd00 100644 --- a/include/drm/i915_hdcp_interface.h +++ b/include/drm/i915_hdcp_interface.h @@ -54,7 +54,7 @@ enum hdcp_ddi { }; /** - * enum hdcp_tc - ME/GSC Firmware defined index for transcoders + * enum hdcp_transcoder - ME/GSC Firmware defined index for transcoders * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder @@ -106,7 +106,7 @@ struct hdcp_port_data { * And Prepare AKE_Init. * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate * AKE_Send_Cert and prepare - AKE_Stored_Km/AKE_No_Stored_Km + * AKE_Stored_Km/AKE_No_Stored_Km * @verify_hprime: Verify AKE_Send_H_prime * @store_pairing_info: Store pairing info received * @initiate_locality_check: Prepare LC_Init @@ -170,14 +170,22 @@ struct i915_hdcp_ops { /** * struct i915_hdcp_arbiter - Used for communication between i915 * and hdcp drivers for the HDCP2.2 services - * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus. - * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver. */ struct i915_hdcp_arbiter { + /** + * @hdcp_dev: device that provides the HDCP2.2 service from MEI Bus. + */ struct device *hdcp_dev; + + /** + * @ops: Ops implemented by hdcp driver or intel_hdcp_gsc, used by i915 + * driver. + */ const struct i915_hdcp_ops *ops; - /* To protect the above members. */ + /** + * @mutex: To protect the above members. + */ struct mutex mutex; }; diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h index 7d96985f2d05..a532d32f58f3 100644 --- a/include/drm/i915_pxp_tee_interface.h +++ b/include/drm/i915_pxp_tee_interface.h @@ -12,20 +12,26 @@ struct scatterlist; /** * struct i915_pxp_component_ops - ops for PXP services. - * @owner: Module providing the ops - * @send: sends data to PXP - * @receive: receives data from PXP */ struct i915_pxp_component_ops { /** - * @owner: owner of the module provding the ops + * @owner: Module providing the ops. */ struct module *owner; + /** + * @send: Send a PXP message. + */ int (*send)(struct device *dev, const void *message, size_t size, unsigned long timeout_ms); + /** + * @recv: Receive a PXP message. + */ int (*recv)(struct device *dev, void *buffer, size_t size, unsigned long timeout_ms); + /** + * @gsc_command: Send a GSC command. + */ ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id, struct scatterlist *sg_in, size_t total_in_len, struct scatterlist *sg_out); @@ -35,14 +41,21 @@ struct i915_pxp_component_ops { /** * struct i915_pxp_component - Used for communication between i915 and TEE * drivers for the PXP services - * @tee_dev: device that provide the PXP service from TEE Bus. - * @pxp_ops: Ops implemented by TEE driver, used by i915 driver. */ struct i915_pxp_component { + /** + * @tee_dev: device that provide the PXP service from TEE Bus. + */ struct device *tee_dev; + + /** + * @ops: Ops implemented by TEE driver, used by i915 driver. + */ const struct i915_pxp_component_ops *ops; - /* To protect the above members. */ + /** + * @mutex: To protect the above members. + */ struct mutex mutex; }; diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 0223a41a64b2..6ccf96c91f3a 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -83,6 +83,9 @@ enum ttm_bo_type { * @resource: structure describing current placement. * @ttm: TTM structure holding system pages. * @deleted: True if the object is only a zombie and already deleted. + * @bulk_move: The bulk move object. + * @priority: Priority for LRU, BOs with lower priority are evicted first. + * @pin_count: Pin count. * * Base class for TTM buffer object, that deals with data placement and CPU * mappings. GPU mappings are really up to the driver, but for simpler GPUs @@ -128,26 +131,27 @@ struct ttm_buffer_object { struct work_struct delayed_delete; /** - * Special members that are protected by the reserve lock - * and the bo::lock when written to. Can be read with - * either of these locks held. + * @sg: external source of pages and DMA addresses, protected by the + * reservation lock. */ struct sg_table *sg; }; +#define TTM_BO_MAP_IOMEM_MASK 0x80 + /** * struct ttm_bo_kmap_obj * * @virtual: The current kernel virtual address. * @page: The page when kmap'ing a single page. * @bo_kmap_type: Type of bo_kmap. + * @bo: The TTM BO. * * Object describing a kernel mapping. Since a TTM bo may be located * in various memory types with various caching policies, the * mapping can either be an ioremap, a vmap, a kmap or part of a * premapped region. */ -#define TTM_BO_MAP_IOMEM_MASK 0x80 struct ttm_bo_kmap_obj { void *virtual; struct page *page; @@ -171,6 +175,7 @@ struct ttm_bo_kmap_obj { * @force_alloc: Don't check the memory account during suspend or CPU page * faults. Should only be used by TTM internally. * @resv: Reservation object to allow reserved evictions with. + * @bytes_moved: Statistics on how many bytes have been moved. * * Context for TTM operations like changing buffer placement or general memory * allocation. @@ -264,7 +269,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, * ttm_bo_reserve_slowpath: * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. - * @sequence: Set (@bo)->sequence to this value after lock + * @ticket: Ticket used to acquire the ww_mutex. * * This is called after ttm_bo_reserve returns -EAGAIN and we backed off * from all our other reservations. Because there are no other reservations @@ -303,7 +308,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, } /** - * ttm_bo_move_null = assign memory for a buffer object. + * ttm_bo_move_null - assign memory for a buffer object. * @bo: The bo to assign the memory to * @new_mem: The memory to be assigned. * diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index 235a743d90e1..a18f43e93aba 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -25,6 +25,8 @@ #ifndef _TTM_CACHING_H_ #define _TTM_CACHING_H_ +#include <linux/pgtable.h> + #define TTM_NUM_CACHING_TYPES 3 /** diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 03aca29d3ce4..fac1e3e57ebd 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -52,7 +52,7 @@ struct ttm_validate_buffer { }; /** - * function ttm_eu_backoff_reservation + * ttm_eu_backoff_reservation * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. @@ -64,14 +64,13 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list); /** - * function ttm_eu_reserve_buffers + * ttm_eu_reserve_buffers * * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only * non-blocking reserves should be tried. * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible * @dups: [out] optional list of duplicates. - * @del_lru: true if BOs should be removed from the LRU. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -102,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *dups); /** - * function ttm_eu_fence_buffer_objects. + * ttm_eu_fence_buffer_objects * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index cc5c09a211b4..fe72631a6e93 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -20,7 +20,7 @@ struct iosys_map; */ struct ttm_kmap_iter_ops { /** - * kmap_local() - Map a PAGE_SIZE part of the resource using + * @map_local: Map a PAGE_SIZE part of the resource using * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. @@ -31,7 +31,7 @@ struct ttm_kmap_iter_ops { void (*map_local)(struct ttm_kmap_iter *res_iter, struct iosys_map *dmap, pgoff_t i); /** - * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * @unmap_local: Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index 4490d43c63e3..160d954a261e 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -32,9 +32,10 @@ #include <drm/ttm/ttm_caching.h> struct device; -struct ttm_tt; -struct ttm_pool; +struct seq_file; struct ttm_operation_ctx; +struct ttm_pool; +struct ttm_tt; /** * struct ttm_pool_type - Pool for a certain memory type diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 1afa13f0c22b..69769355139f 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -251,6 +251,9 @@ struct ttm_lru_bulk_move_pos { * * Container for the current bulk move state. Should be used with * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). + * All BOs in a bulk_move structure need to share the same reservation object to + * ensure that the bulk as a whole is locked for eviction even if only one BO of + * the bulk is evicted. */ struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; @@ -366,7 +369,8 @@ bool ttm_resource_intersects(struct ttm_device *bdev, const struct ttm_place *place, size_t size); bool ttm_resource_compatible(struct ttm_resource *res, - struct ttm_placement *placement); + struct ttm_placement *placement, + bool evicting); void ttm_resource_set_bo(struct ttm_resource *res, struct ttm_buffer_object *bo); diff --git a/include/linux/fb.h b/include/linux/fb.h index 0dd27364d56f..811e47f9d1c3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -694,6 +694,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start, __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys) +#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \ + __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \ + __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys) + /* * Initializes struct fb_ops for deferred I/O. */ diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index cd84227f1b42..8ad8d1cd1566 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -73,6 +73,16 @@ struct drm_nouveau_getparam { __u64 value; }; +/* + * Those are used to support selecting the main engine used on Kepler. + * This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle + */ +#define NOUVEAU_FIFO_ENGINE_GR 0x01 +#define NOUVEAU_FIFO_ENGINE_VP 0x02 +#define NOUVEAU_FIFO_ENGINE_PPP 0x04 +#define NOUVEAU_FIFO_ENGINE_BSP 0x08 +#define NOUVEAU_FIFO_ENGINE_CE 0x30 + struct drm_nouveau_channel_alloc { __u32 fb_ctxdma_handle; __u32 tt_ctxdma_handle; @@ -95,6 +105,18 @@ struct drm_nouveau_channel_free { __s32 channel; }; +struct drm_nouveau_notifierobj_alloc { + __u32 channel; + __u32 handle; + __u32 size; + __u32 offset; +}; + +struct drm_nouveau_gpuobj_free { + __s32 channel; + __u32 handle; +}; + #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h new file mode 100644 index 000000000000..dadb05ab1235 --- /dev/null +++ b/include/uapi/drm/panthor_drm.h @@ -0,0 +1,945 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2023 Collabora ltd. */ +#ifndef _PANTHOR_DRM_H_ +#define _PANTHOR_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: Introduction + * + * This documentation describes the Panthor IOCTLs. + * + * Just a few generic rules about the data passed to the Panthor IOCTLs: + * + * - Structures must be aligned on 64-bit/8-byte. If the object is not + * naturally aligned, a padding field must be added. + * - Fields must be explicitly aligned to their natural type alignment with + * pad[0..N] fields. + * - All padding fields will be checked by the driver to make sure they are + * zeroed. + * - Flags can be added, but not removed/replaced. + * - New fields can be added to the main structures (the structures + * directly passed to the ioctl). Those fields can be added at the end of + * the structure, or replace existing padding fields. Any new field being + * added must preserve the behavior that existed before those fields were + * added when a value of zero is passed. + * - New fields can be added to indirect objects (objects pointed by the + * main structure), iff those objects are passed a size to reflect the + * size known by the userspace driver (see drm_panthor_obj_array::stride + * or drm_panthor_dev_query::size). + * - If the kernel driver is too old to know some fields, those will be + * ignored if zero, and otherwise rejected (and so will be zero on output). + * - If userspace is too old to know some fields, those will be zeroed + * (input) before the structure is parsed by the kernel driver. + * - Each new flag/field addition must come with a driver version update so + * the userspace driver doesn't have to trial and error to know which + * flags are supported. + * - Structures should not contain unions, as this would defeat the + * extensibility of such structures. + * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed + * at the end of the drm_panthor_ioctl_id enum. + */ + +/** + * DOC: MMIO regions exposed to userspace. + * + * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET + * + * File offset for all MMIO regions being exposed to userspace. Don't use + * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead. + * pgoffset passed to mmap2() is an unsigned long, which forces us to use a + * different offset on 32-bit and 64-bit systems. + * + * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET + * + * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls + * GPU cache flushing through CS instructions, but the flush reduction + * mechanism requires a flush_id. This flush_id could be queried with an + * ioctl, but Arm provides a well-isolated register page containing only this + * read-only register, so let's expose this page through a static mmap offset + * and allow direct mapping of this MMIO region so we can avoid the + * user <-> kernel round-trip. + */ +#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43) +#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56) +#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \ + DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \ + DRM_PANTHOR_USER_MMIO_OFFSET_64BIT) +#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0) + +/** + * DOC: IOCTL IDs + * + * enum drm_panthor_ioctl_id - IOCTL IDs + * + * Place new ioctls at the end, don't re-order, don't replace or remove entries. + * + * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx + * definitions instead. + */ +enum drm_panthor_ioctl_id { + /** @DRM_PANTHOR_DEV_QUERY: Query device information. */ + DRM_PANTHOR_DEV_QUERY = 0, + + /** @DRM_PANTHOR_VM_CREATE: Create a VM. */ + DRM_PANTHOR_VM_CREATE, + + /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */ + DRM_PANTHOR_VM_DESTROY, + + /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */ + DRM_PANTHOR_VM_BIND, + + /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */ + DRM_PANTHOR_VM_GET_STATE, + + /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */ + DRM_PANTHOR_BO_CREATE, + + /** + * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to + * mmap to map a GEM object. + */ + DRM_PANTHOR_BO_MMAP_OFFSET, + + /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */ + DRM_PANTHOR_GROUP_CREATE, + + /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */ + DRM_PANTHOR_GROUP_DESTROY, + + /** + * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging + * to a specific scheduling group. + */ + DRM_PANTHOR_GROUP_SUBMIT, + + /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */ + DRM_PANTHOR_GROUP_GET_STATE, + + /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */ + DRM_PANTHOR_TILER_HEAP_CREATE, + + /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */ + DRM_PANTHOR_TILER_HEAP_DESTROY, +}; + +/** + * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number + * @__access: Access type. Must be R, W or RW. + * @__id: One of the DRM_PANTHOR_xxx id. + * @__type: Suffix of the type being passed to the IOCTL. + * + * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx + * values instead. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define DRM_IOCTL_PANTHOR(__access, __id, __type) \ + DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \ + struct drm_panthor_ ## __type) + +#define DRM_IOCTL_PANTHOR_DEV_QUERY \ + DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query) +#define DRM_IOCTL_PANTHOR_VM_CREATE \ + DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create) +#define DRM_IOCTL_PANTHOR_VM_DESTROY \ + DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy) +#define DRM_IOCTL_PANTHOR_VM_BIND \ + DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind) +#define DRM_IOCTL_PANTHOR_VM_GET_STATE \ + DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state) +#define DRM_IOCTL_PANTHOR_BO_CREATE \ + DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create) +#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \ + DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset) +#define DRM_IOCTL_PANTHOR_GROUP_CREATE \ + DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create) +#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \ + DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy) +#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \ + DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit) +#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \ + DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state) +#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \ + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create) +#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \ + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy) + +/** + * DOC: IOCTL arguments + */ + +/** + * struct drm_panthor_obj_array - Object array. + * + * This object is used to pass an array of objects whose size is subject to changes in + * future versions of the driver. In order to support this mutability, we pass a stride + * describing the size of the object as known by userspace. + * + * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use + * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to + * the object size. + */ +struct drm_panthor_obj_array { + /** @stride: Stride of object struct. Used for versioning. */ + __u32 stride; + + /** @count: Number of objects in the array. */ + __u32 count; + + /** @array: User pointer to an array of objects. */ + __u64 array; +}; + +/** + * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field. + * @cnt: Number of elements in the array. + * @ptr: Pointer to the array to pass to the kernel. + * + * Macro initializing a drm_panthor_obj_array based on the object size as known + * by userspace. + */ +#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \ + { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) } + +/** + * enum drm_panthor_sync_op_flags - Synchronization operation flags. + */ +enum drm_panthor_sync_op_flags { + /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff, + + /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0, + + /** + * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization + * object type. + */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1, + + /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */ + DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31, + + /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */ + DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31), +}; + +/** + * struct drm_panthor_sync_op - Synchronization operation. + */ +struct drm_panthor_sync_op { + /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */ + __u32 flags; + + /** @handle: Sync handle. */ + __u32 handle; + + /** + * @timeline_value: MBZ if + * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) != + * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ. + */ + __u64 timeline_value; +}; + +/** + * enum drm_panthor_dev_query_type - Query type + * + * Place new types at the end, don't re-order, don't remove or replace. + */ +enum drm_panthor_dev_query_type { + /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */ + DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0, + + /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */ + DRM_PANTHOR_DEV_QUERY_CSIF_INFO, +}; + +/** + * struct drm_panthor_gpu_info - GPU information + * + * Structure grouping all queryable information relating to the GPU. + */ +struct drm_panthor_gpu_info { + /** @gpu_id : GPU ID. */ + __u32 gpu_id; +#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28) +#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf) +#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf) +#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf) +#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf) +#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff) +#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf) + + /** @gpu_rev: GPU revision. */ + __u32 gpu_rev; + + /** @csf_id: Command stream frontend ID. */ + __u32 csf_id; +#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f) +#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f) +#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf) +#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f) +#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f) +#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf) + + /** @l2_features: L2-cache features. */ + __u32 l2_features; + + /** @tiler_features: Tiler features. */ + __u32 tiler_features; + + /** @mem_features: Memory features. */ + __u32 mem_features; + + /** @mmu_features: MMU features. */ + __u32 mmu_features; +#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff) + + /** @thread_features: Thread features. */ + __u32 thread_features; + + /** @max_threads: Maximum number of threads. */ + __u32 max_threads; + + /** @thread_max_workgroup_size: Maximum workgroup size. */ + __u32 thread_max_workgroup_size; + + /** + * @thread_max_barrier_size: Maximum number of threads that can wait + * simultaneously on a barrier. + */ + __u32 thread_max_barrier_size; + + /** @coherency_features: Coherency features. */ + __u32 coherency_features; + + /** @texture_features: Texture features. */ + __u32 texture_features[4]; + + /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */ + __u32 as_present; + + /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */ + __u64 shader_present; + + /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */ + __u64 l2_present; + + /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */ + __u64 tiler_present; + + /** @core_features: Used to discriminate core variants when they exist. */ + __u32 core_features; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_csif_info - Command stream interface information + * + * Structure grouping all queryable information relating to the command stream interface. + */ +struct drm_panthor_csif_info { + /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */ + __u32 csg_slot_count; + + /** @cs_slot_count: Number of command stream slots per group. */ + __u32 cs_slot_count; + + /** @cs_reg_count: Number of command stream registers. */ + __u32 cs_reg_count; + + /** @scoreboard_slot_count: Number of scoreboard slots. */ + __u32 scoreboard_slot_count; + + /** + * @unpreserved_cs_reg_count: Number of command stream registers reserved by + * the kernel driver to call a userspace command stream. + * + * All registers can be used by a userspace command stream, but the + * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are + * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called. + */ + __u32 unpreserved_cs_reg_count; + + /** + * @pad: Padding field, set to zero. + */ + __u32 pad; +}; + +/** + * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY + */ +struct drm_panthor_dev_query { + /** @type: the query type (see drm_panthor_dev_query_type). */ + __u32 type; + + /** + * @size: size of the type being queried. + * + * If pointer is NULL, size is updated by the driver to provide the + * output structure size. If pointer is not NULL, the driver will + * only copy min(size, actual_structure_size) bytes to the pointer, + * and update the size accordingly. This allows us to extend query + * types without breaking userspace. + */ + __u32 size; + + /** + * @pointer: user pointer to a query type struct. + * + * Pointer can be NULL, in which case, nothing is copied, but the + * actual structure size is returned. If not NULL, it must point to + * a location that's large enough to hold size bytes. + */ + __u64 pointer; +}; + +/** + * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE + */ +struct drm_panthor_vm_create { + /** @flags: VM flags, MBZ. */ + __u32 flags; + + /** @id: Returned VM ID. */ + __u32 id; + + /** + * @user_va_range: Size of the VA space reserved for user objects. + * + * The kernel will pick the remaining space to map kernel-only objects to the + * VM (heap chunks, heap context, ring buffers, kernel synchronization objects, + * ...). If the space left for kernel objects is too small, kernel object + * allocation will fail further down the road. One can use + * drm_panthor_gpu_info::mmu_features to extract the total virtual address + * range, and chose a user_va_range that leaves some space to the kernel. + * + * If user_va_range is zero, the kernel will pick a sensible value based on + * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user + * split should leave enough VA space for userspace processes to support SVM, + * while still allowing the kernel to map some amount of kernel objects in + * the kernel VA range). The value chosen by the driver will be returned in + * @user_va_range. + * + * User VA space always starts at 0x0, kernel VA space is always placed after + * the user VA range. + */ + __u64 user_va_range; +}; + +/** + * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY + */ +struct drm_panthor_vm_destroy { + /** @id: ID of the VM to destroy. */ + __u32 id; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * enum drm_panthor_vm_bind_op_flags - VM bind operation flags + */ +enum drm_panthor_vm_bind_op_flags { + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0, + + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1, + + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2, + + /** + * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation. + */ + DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28), + + /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */ + DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28, + + /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */ + DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28, + + /** + * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation. + * + * Just serves as a synchronization point on a VM queue. + * + * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags, + * and drm_panthor_vm_bind_op::syncs contains at least one element. + */ + DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28, +}; + +/** + * struct drm_panthor_vm_bind_op - VM bind operation + */ +struct drm_panthor_vm_bind_op { + /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */ + __u32 flags; + + /** + * @bo_handle: Handle of the buffer object to map. + * MBZ for unmap or sync-only operations. + */ + __u32 bo_handle; + + /** + * @bo_offset: Buffer object offset. + * MBZ for unmap or sync-only operations. + */ + __u64 bo_offset; + + /** + * @va: Virtual address to map/unmap. + * MBZ for sync-only operations. + */ + __u64 va; + + /** + * @size: Size to map/unmap. + * MBZ for sync-only operations. + */ + __u64 size; + + /** + * @syncs: Array of struct drm_panthor_sync_op synchronization + * operations. + * + * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on + * the drm_panthor_vm_bind object containing this VM bind operation. + * + * This array shall not be empty for sync-only operations. + */ + struct drm_panthor_obj_array syncs; + +}; + +/** + * enum drm_panthor_vm_bind_flags - VM bind flags + */ +enum drm_panthor_vm_bind_flags { + /** + * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM + * queue instead of being executed synchronously. + */ + DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0, +}; + +/** + * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND + */ +struct drm_panthor_vm_bind { + /** @vm_id: VM targeted by the bind request. */ + __u32 vm_id; + + /** @flags: Combination of drm_panthor_vm_bind_flags flags. */ + __u32 flags; + + /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */ + struct drm_panthor_obj_array ops; +}; + +/** + * enum drm_panthor_vm_state - VM states. + */ +enum drm_panthor_vm_state { + /** + * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable. + * + * New VM operations will be accepted on this VM. + */ + DRM_PANTHOR_VM_STATE_USABLE, + + /** + * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable. + * + * Something put the VM in an unusable state (like an asynchronous + * VM_BIND request failing for any reason). + * + * Once the VM is in this state, all new MAP operations will be + * rejected, and any GPU job targeting this VM will fail. + * UNMAP operations are still accepted. + * + * The only way to recover from an unusable VM is to create a new + * VM, and destroy the old one. + */ + DRM_PANTHOR_VM_STATE_UNUSABLE, +}; + +/** + * struct drm_panthor_vm_get_state - Get VM state. + */ +struct drm_panthor_vm_get_state { + /** @vm_id: VM targeted by the get_state request. */ + __u32 vm_id; + + /** + * @state: state returned by the driver. + * + * Must be one of the enum drm_panthor_vm_state values. + */ + __u32 state; +}; + +/** + * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time. + */ +enum drm_panthor_bo_flags { + /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */ + DRM_PANTHOR_BO_NO_MMAP = (1 << 0), +}; + +/** + * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE. + */ +struct drm_panthor_bo_create { + /** + * @size: Requested size for the object + * + * The (page-aligned) allocated size for the object will be returned. + */ + __u64 size; + + /** + * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags. + */ + __u32 flags; + + /** + * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to. + * + * If not zero, the field must refer to a valid VM ID, and implies that: + * - the buffer object will only ever be bound to that VM + * - cannot be exported as a PRIME fd + */ + __u32 exclusive_vm_id; + + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET. + */ +struct drm_panthor_bo_mmap_offset { + /** @handle: Handle of the object we want an mmap offset for. */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** @offset: The fake offset to use for subsequent mmap calls. */ + __u64 offset; +}; + +/** + * struct drm_panthor_queue_create - Queue creation arguments. + */ +struct drm_panthor_queue_create { + /** + * @priority: Defines the priority of queues inside a group. Goes from 0 to 15, + * 15 being the highest priority. + */ + __u8 priority; + + /** @pad: Padding fields, MBZ. */ + __u8 pad[3]; + + /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */ + __u32 ringbuf_size; +}; + +/** + * enum drm_panthor_group_priority - Scheduling group priority + */ +enum drm_panthor_group_priority { + /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */ + PANTHOR_GROUP_PRIORITY_LOW = 0, + + /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */ + PANTHOR_GROUP_PRIORITY_MEDIUM, + + /** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */ + PANTHOR_GROUP_PRIORITY_HIGH, +}; + +/** + * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE + */ +struct drm_panthor_group_create { + /** @queues: Array of drm_panthor_queue_create elements. */ + struct drm_panthor_obj_array queues; + + /** + * @max_compute_cores: Maximum number of cores that can be used by compute + * jobs across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @compute_core_mask. + */ + __u8 max_compute_cores; + + /** + * @max_fragment_cores: Maximum number of cores that can be used by fragment + * jobs across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @fragment_core_mask. + */ + __u8 max_fragment_cores; + + /** + * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs + * across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @tiler_core_mask. + */ + __u8 max_tiler_cores; + + /** @priority: Group priority (see enum drm_panthor_group_priority). */ + __u8 priority; + + /** @pad: Padding field, MBZ. */ + __u32 pad; + + /** + * @compute_core_mask: Mask encoding cores that can be used for compute jobs. + * + * This field must have at least @max_compute_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::shader_present. + */ + __u64 compute_core_mask; + + /** + * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs. + * + * This field must have at least @max_fragment_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::shader_present. + */ + __u64 fragment_core_mask; + + /** + * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs. + * + * This field must have at least @max_tiler_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::tiler_present. + */ + __u64 tiler_core_mask; + + /** + * @vm_id: VM ID to bind this group to. + * + * All submission to queues bound to this group will use this VM. + */ + __u32 vm_id; + + /** + * @group_handle: Returned group handle. Passed back when submitting jobs or + * destroying a group. + */ + __u32 group_handle; +}; + +/** + * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY + */ +struct drm_panthor_group_destroy { + /** @group_handle: Group to destroy */ + __u32 group_handle; + + /** @pad: Padding field, MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_queue_submit - Job submission arguments. + * + * This is describing the userspace command stream to call from the kernel + * command stream ring-buffer. Queue submission is always part of a group + * submission, taking one or more jobs to submit to the underlying queues. + */ +struct drm_panthor_queue_submit { + /** @queue_index: Index of the queue inside a group. */ + __u32 queue_index; + + /** + * @stream_size: Size of the command stream to execute. + * + * Must be 64-bit/8-byte aligned (the size of a CS instruction) + * + * Can be zero if stream_addr is zero too. + */ + __u32 stream_size; + + /** + * @stream_addr: GPU address of the command stream to execute. + * + * Must be aligned on 64-byte. + * + * Can be zero is stream_size is zero too. + */ + __u64 stream_addr; + + /** + * @latest_flush: FLUSH_ID read at the time the stream was built. + * + * This allows cache flush elimination for the automatic + * flush+invalidate(all) done at submission time, which is needed to + * ensure the GPU doesn't get garbage when reading the indirect command + * stream buffers. If you want the cache flush to happen + * unconditionally, pass a zero here. + */ + __u32 latest_flush; + + /** @pad: MBZ. */ + __u32 pad; + + /** @syncs: Array of struct drm_panthor_sync_op sync operations. */ + struct drm_panthor_obj_array syncs; +}; + +/** + * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT + */ +struct drm_panthor_group_submit { + /** @group_handle: Handle of the group to queue jobs to. */ + __u32 group_handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** @queue_submits: Array of drm_panthor_queue_submit objects. */ + struct drm_panthor_obj_array queue_submits; +}; + +/** + * enum drm_panthor_group_state_flags - Group state flags + */ +enum drm_panthor_group_state_flags { + /** + * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs. + * + * When a group ends up with this flag set, no jobs can be submitted to its queues. + */ + DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0, + + /** + * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults. + * + * When a group ends up with this flag set, no jobs can be submitted to its queues. + */ + DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1, +}; + +/** + * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE + * + * Used to query the state of a group and decide whether a new group should be created to + * replace it. + */ +struct drm_panthor_group_get_state { + /** @group_handle: Handle of the group to query state on */ + __u32 group_handle; + + /** + * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the + * group state. + */ + __u32 state; + + /** @fatal_queues: Bitmask of queues that faced fatal faults. */ + __u32 fatal_queues; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE + */ +struct drm_panthor_tiler_heap_create { + /** @vm_id: VM ID the tiler heap should be mapped to */ + __u32 vm_id; + + /** @initial_chunk_count: Initial number of chunks to allocate. */ + __u32 initial_chunk_count; + + /** @chunk_size: Chunk size. Must be a power of two at least 256KB large. */ + __u32 chunk_size; + + /** @max_chunks: Maximum number of chunks that can be allocated. */ + __u32 max_chunks; + + /** + * @target_in_flight: Maximum number of in-flight render passes. + * + * If the heap has more than tiler jobs in-flight, the FW will wait for render + * passes to finish before queuing new tiler jobs. + */ + __u32 target_in_flight; + + /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */ + __u32 handle; + + /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */ + __u64 tiler_heap_ctx_gpu_va; + + /** + * @first_heap_chunk_gpu_va: First heap chunk. + * + * The tiler heap is formed of heap chunks forming a single-link list. This + * is the first element in the list. + */ + __u64 first_heap_chunk_gpu_va; +}; + +/** + * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY + */ +struct drm_panthor_tiler_heap_destroy { + /** @handle: Handle of the tiler heap to destroy */ + __u32 handle; + + /** @pad: Padding field, MBZ. */ + __u32 pad; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _PANTHOR_DRM_H_ */ |