diff options
| author | Francois Dugast <[email protected]> | 2023-11-14 13:34:27 +0000 |
|---|---|---|
| committer | Rodrigo Vivi <[email protected]> | 2023-12-21 11:44:33 -0500 |
| commit | d5dc73dbd148ef38dbe35f18d2908d2ff343c208 (patch) | |
| tree | c4614e7ecc3ad6cdde34f5cdf3df3fb7d6e67550 /include/uapi | |
| parent | b646ce9ce99f74d3dee8fd56303b9255d3c278ec (diff) | |
drm/xe/uapi: Add missing DRM_ prefix in uAPI constants
Most constants defined in xe_drm.h use DRM_XE_ as prefix which is
helpful to identify the name space. Make this systematic and add
this prefix where it was missing.
v2:
- fix vertical alignment of define values
- remove double DRM_ in some variables (José Roberto de Souza)
v3: Rebase
Signed-off-by: Francois Dugast <[email protected]>
Reviewed-by: Matthew Brost <[email protected]>
Signed-off-by: Rodrigo Vivi <[email protected]>
Diffstat (limited to 'include/uapi')
| -rw-r--r-- | include/uapi/drm/xe_drm.h | 124 |
1 files changed, 62 insertions, 62 deletions
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index e007dbefd627..3ef49e3baaed 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -19,12 +19,12 @@ extern "C" { /** * DOC: uevent generated by xe on it's pci node. * - * XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt + * DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt * fails. The value supplied with the event is always "NEEDS_RESET". * Additional information supplied is tile id and gt id of the gt unit for * which reset has failed. */ -#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS" +#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS" /** * struct xe_user_extension - Base class for defining a chain of extensions @@ -148,14 +148,14 @@ struct drm_xe_engine_class_instance { * enum drm_xe_memory_class - Supported memory classes. */ enum drm_xe_memory_class { - /** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ - XE_MEM_REGION_CLASS_SYSMEM = 0, + /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ + DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, /** - * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this + * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this * represents the memory that is local to the device, which we * call VRAM. Not valid on integrated platforms. */ - XE_MEM_REGION_CLASS_VRAM + DRM_XE_MEM_REGION_CLASS_VRAM }; /** @@ -215,7 +215,7 @@ struct drm_xe_query_mem_region { * always equal the @total_size, since all of it will be CPU * accessible. * - * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM + * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM * regions (for other types the value here will always equal * zero). */ @@ -227,7 +227,7 @@ struct drm_xe_query_mem_region { * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable * accounting. Without this the value here will always equal * zero. Note this is only currently tracked for - * XE_MEM_REGION_CLASS_VRAM regions (for other types the value + * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value * here will always be zero). */ __u64 cpu_visible_used; @@ -320,12 +320,12 @@ struct drm_xe_query_config { /** @pad: MBZ */ __u32 pad; -#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 -#define XE_QUERY_CONFIG_FLAGS 1 - #define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0) -#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2 -#define XE_QUERY_CONFIG_VA_BITS 3 -#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 +#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 +#define DRM_XE_QUERY_CONFIG_FLAGS 1 + #define DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0) +#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 +#define DRM_XE_QUERY_CONFIG_VA_BITS 3 +#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 /** @info: array of elements containing the config info */ __u64 info[]; }; @@ -339,8 +339,8 @@ struct drm_xe_query_config { * implementing graphics and/or media operations. */ struct drm_xe_query_gt { -#define XE_QUERY_GT_TYPE_MAIN 0 -#define XE_QUERY_GT_TYPE_MEDIA 1 +#define DRM_XE_QUERY_GT_TYPE_MAIN 0 +#define DRM_XE_QUERY_GT_TYPE_MEDIA 1 /** @type: GT type: Main or Media */ __u16 type; /** @gt_id: Unique ID of this GT within the PCI Device */ @@ -400,7 +400,7 @@ struct drm_xe_query_topology_mask { * DSS_GEOMETRY ff ff ff ff 00 00 00 00 * means 32 DSS are available for geometry. */ -#define XE_TOPO_DSS_GEOMETRY (1 << 0) +#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0) /* * To query the mask of Dual Sub Slices (DSS) available for compute * operations. For example a query response containing the following @@ -408,7 +408,7 @@ struct drm_xe_query_topology_mask { * DSS_COMPUTE ff ff ff ff 00 00 00 00 * means 32 DSS are available for compute. */ -#define XE_TOPO_DSS_COMPUTE (1 << 1) +#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1) /* * To query the mask of Execution Units (EU) available per Dual Sub * Slices (DSS). For example a query response containing the following @@ -416,7 +416,7 @@ struct drm_xe_query_topology_mask { * EU_PER_DSS ff ff 00 00 00 00 00 00 * means each DSS has 16 EU. */ -#define XE_TOPO_EU_PER_DSS (1 << 2) +#define DRM_XE_TOPO_EU_PER_DSS (1 << 2) /** @type: type of mask */ __u16 type; @@ -497,8 +497,8 @@ struct drm_xe_gem_create { */ __u64 size; -#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) -#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) +#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) +#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) /* * When using VRAM as a possible placement, ensure that the corresponding VRAM * allocation will always use the CPU accessible part of VRAM. This is important @@ -514,7 +514,7 @@ struct drm_xe_gem_create { * display surfaces, therefore the kernel requires setting this flag for such * objects, otherwise an error is thrown on small-bar systems. */ -#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) +#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) /** * @flags: Flags, currently a mask of memory instances of where BO can * be placed @@ -581,14 +581,14 @@ struct drm_xe_ext_set_property { }; struct drm_xe_vm_create { -#define XE_VM_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; -#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) -#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) -#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) -#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) +#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) +#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) +#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) +#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) /** @flags: Flags */ __u32 flags; @@ -644,29 +644,29 @@ struct drm_xe_vm_bind_op { */ __u64 tile_mask; -#define XE_VM_BIND_OP_MAP 0x0 -#define XE_VM_BIND_OP_UNMAP 0x1 -#define XE_VM_BIND_OP_MAP_USERPTR 0x2 -#define XE_VM_BIND_OP_UNMAP_ALL 0x3 -#define XE_VM_BIND_OP_PREFETCH 0x4 +#define DRM_XE_VM_BIND_OP_MAP 0x0 +#define DRM_XE_VM_BIND_OP_UNMAP 0x1 +#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 +#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 +#define DRM_XE_VM_BIND_OP_PREFETCH 0x4 /** @op: Bind operation to perform */ __u32 op; -#define XE_VM_BIND_FLAG_READONLY (0x1 << 0) -#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1) +#define DRM_XE_VM_BIND_FLAG_READONLY (0x1 << 0) +#define DRM_XE_VM_BIND_FLAG_ASYNC (0x1 << 1) /* * Valid on a faulting VM only, do the MAP operation immediately rather * than deferring the MAP to the page fault handler. */ -#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2) +#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2) /* * When the NULL flag is set, the page tables are setup with a special * bit which indicates writes are dropped and all reads return zero. In - * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP + * the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP * operations, the BO handle MBZ, and the BO offset MBZ. This flag is * intended to implement VK sparse bindings. */ -#define XE_VM_BIND_FLAG_NULL (0x1 << 3) +#define DRM_XE_VM_BIND_FLAG_NULL (0x1 << 3) /** @flags: Bind flags */ __u32 flags; @@ -721,19 +721,19 @@ struct drm_xe_vm_bind { __u64 reserved[2]; }; -/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */ +/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */ /* Monitor 128KB contiguous region with 4K sub-granularity */ -#define XE_ACC_GRANULARITY_128K 0 +#define DRM_XE_ACC_GRANULARITY_128K 0 /* Monitor 2MB contiguous region with 64KB sub-granularity */ -#define XE_ACC_GRANULARITY_2M 1 +#define DRM_XE_ACC_GRANULARITY_2M 1 /* Monitor 16MB contiguous region with 512KB sub-granularity */ -#define XE_ACC_GRANULARITY_16M 2 +#define DRM_XE_ACC_GRANULARITY_16M 2 /* Monitor 64MB contiguous region with 2M sub-granularity */ -#define XE_ACC_GRANULARITY_64M 3 +#define DRM_XE_ACC_GRANULARITY_64M 3 /** * struct drm_xe_exec_queue_set_property - exec queue set property @@ -747,14 +747,14 @@ struct drm_xe_exec_queue_set_property { /** @exec_queue_id: Exec queue ID */ __u32 exec_queue_id; -#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 -#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 -#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 -#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 -#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 /** @property: property to set */ __u32 property; @@ -766,7 +766,7 @@ struct drm_xe_exec_queue_set_property { }; struct drm_xe_exec_queue_create { -#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 +#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 /** @extensions: Pointer to the first extension struct, if any */ __u64 extensions; @@ -805,7 +805,7 @@ struct drm_xe_exec_queue_get_property { /** @exec_queue_id: Exec queue ID */ __u32 exec_queue_id; -#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 /** @property: property to get */ __u32 property; @@ -973,11 +973,11 @@ struct drm_xe_wait_user_fence { /** * DOC: XE PMU event config IDs * - * Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h + * Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h * in 'struct perf_event_attr' as part of perf_event_open syscall to read a * particular event. * - * For example to open the XE_PMU_RENDER_GROUP_BUSY(0): + * For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0): * * .. code-block:: C * @@ -991,7 +991,7 @@ struct drm_xe_wait_user_fence { * attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED; * attr.use_clockid = 1; * attr.clockid = CLOCK_MONOTONIC; - * attr.config = XE_PMU_RENDER_GROUP_BUSY(0); + * attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0); * * fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0); */ @@ -999,15 +999,15 @@ struct drm_xe_wait_user_fence { /* * Top bits of every counter are GT id. */ -#define __XE_PMU_GT_SHIFT (56) +#define __DRM_XE_PMU_GT_SHIFT (56) -#define ___XE_PMU_OTHER(gt, x) \ - (((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT)) +#define ___DRM_XE_PMU_OTHER(gt, x) \ + (((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT)) -#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0) -#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1) -#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2) -#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3) +#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0) +#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1) +#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2) +#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3) #if defined(__cplusplus) } |