diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mlx5_ib.h')
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 79 |
1 files changed, 40 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 998b67509a53..2e2ad3918385 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -259,6 +259,12 @@ struct mlx5_ib_flow_matcher { u8 match_criteria_enable; }; +struct mlx5_ib_steering_anchor { + struct mlx5_ib_flow_prio *ft_prio; + struct mlx5_ib_dev *dev; + atomic_t usecnt; +}; + struct mlx5_ib_pp { u16 index; struct mlx5_core_dev *mdev; @@ -613,6 +619,7 @@ struct mlx5_ib_mkey { unsigned int ndescs; struct wait_queue_head wait; refcount_t usecount; + struct mlx5_cache_ent *cache_ent; }; #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) @@ -635,20 +642,9 @@ struct mlx5_ib_mr { struct ib_mr ibmr; struct mlx5_ib_mkey mmkey; - /* User MR data */ - struct mlx5_cache_ent *cache_ent; - /* Everything after cache_ent is zero'd when MR allocated */ struct ib_umem *umem; union { - /* Used only while the MR is in the cache */ - struct { - u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; - struct mlx5_async_work cb_work; - /* Cache list element */ - struct list_head list; - }; - /* Used only by kernel MRs (umem == NULL) */ struct { void *descs; @@ -688,12 +684,6 @@ struct mlx5_ib_mr { }; }; -/* Zero the fields in the mr that are variant depending on usage */ -static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr) -{ - memset_after(mr, 0, cache_ent); -} - static inline bool is_odp_mr(struct mlx5_ib_mr *mr) { return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && @@ -717,21 +707,29 @@ struct mlx5_ib_umr_context { struct completion done; }; +enum { + MLX5_UMR_STATE_ACTIVE, + MLX5_UMR_STATE_RECOVER, + MLX5_UMR_STATE_ERR, +}; + struct umr_common { struct ib_pd *pd; struct ib_cq *cq; struct ib_qp *qp; - /* control access to UMR QP + /* Protects from UMR QP overflow */ struct semaphore sem; + /* Protects from using UMR while the UMR is not active + */ + struct mutex lock; + unsigned int state; }; struct mlx5_cache_ent { - struct list_head head; - /* sync access to the cahce entry - */ - spinlock_t lock; - + struct xarray mkeys; + unsigned long stored; + unsigned long reserved; char name[4]; u32 order; @@ -743,18 +741,11 @@ struct mlx5_cache_ent { u8 fill_to_high_water:1; /* - * - available_mrs is the length of list head, ie the number of MRs - * available for immediate allocation. - * - total_mrs is available_mrs plus all in use MRs that could be - * returned to the cache. - * - limit is the low water mark for available_mrs, 2* limit is the + * - limit is the low water mark for stored mkeys, 2* limit is the * upper water mark. - * - pending is the number of MRs currently being created */ - u32 total_mrs; - u32 available_mrs; + u32 in_use; u32 limit; - u32 pending; /* Statistics */ u32 miss; @@ -763,9 +754,19 @@ struct mlx5_cache_ent { struct delayed_work dwork; }; -struct mlx5_mr_cache { +struct mlx5r_async_create_mkey { + union { + u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)]; + u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; + }; + struct mlx5_async_work cb_work; + struct mlx5_cache_ent *ent; + u32 mkey; +}; + +struct mlx5_mkey_cache { struct workqueue_struct *wq; - struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES]; struct dentry *root; unsigned long last_add; }; @@ -1064,7 +1065,7 @@ struct mlx5_ib_dev { struct mlx5_ib_resources devr; atomic_t mkey_var; - struct mlx5_mr_cache cache; + struct mlx5_mkey_cache cache; struct timer_list delay_timer; /* Prevents soft lock on massive reg MRs */ struct mutex slow_path_mutex; @@ -1309,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, u64 access_flags); void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); -int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); -int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); +int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev); +int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev); struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent, @@ -1338,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); -void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); +void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent); void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, struct mlx5_ib_mr *mr, int flags); @@ -1357,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} static inline int mlx5_ib_odp_init(void) { return 0; } static inline void mlx5_ib_odp_cleanup(void) {} -static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} +static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {} static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, struct mlx5_ib_mr *mr, int flags) {} |