diff options
author | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 11:02:41 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 11:02:41 +0200 |
commit | 4d6ca227c768b50b05cf183974b40abe444e9d0c (patch) | |
tree | bf953d8e895281053548b9967a2c4b58d641df00 /drivers/infiniband/hw/mlx5/mlx5_ib.h | |
parent | 800f3eef8ebc1264e9c135bfa892c8ae41fa4792 (diff) | |
parent | af22a610bc38508d5ea760507d31be6b6983dfa8 (diff) |
Merge branch 'for-4.12/asus' into for-linus
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mlx5_ib.h')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 167 |
1 files changed, 96 insertions, 71 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6c6057eb60ea..3cd064b5f0bf 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -90,7 +90,6 @@ enum mlx5_ib_latency_class { MLX5_IB_LATENCY_CLASS_LOW, MLX5_IB_LATENCY_CLASS_MEDIUM, MLX5_IB_LATENCY_CLASS_HIGH, - MLX5_IB_LATENCY_CLASS_FAST_PATH }; enum mlx5_ib_mad_ifc_flags { @@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags { }; enum { - MLX5_CROSS_CHANNEL_UUAR = 0, + MLX5_CROSS_CHANNEL_BFREG = 0, }; enum { @@ -120,11 +119,16 @@ struct mlx5_ib_ucontext { /* protect doorbell record alloc/free */ struct mutex db_page_mutex; - struct mlx5_uuar_info uuari; + struct mlx5_bfreg_info bfregi; u8 cqe_version; /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + + unsigned long upd_xlt_page; + /* protect ODP/KSM */ + struct mutex upd_xlt_page_mutex; + u64 lib_caps; }; static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) @@ -174,13 +178,12 @@ struct mlx5_ib_flow_db { * enum ib_send_flags and enum ib_qp_type for low-level driver */ -#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START -#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1) -#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2) - -#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3) -#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4) -#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END +#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) +#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) +#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) +#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) +#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) +#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 /* @@ -190,6 +193,17 @@ struct mlx5_ib_flow_db { #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 +#define MLX5_IB_UMR_OCTOWORD 16 +#define MLX5_IB_UMR_XLT_ALIGNMENT 64 + +#define MLX5_IB_UPD_XLT_ZAP BIT(0) +#define MLX5_IB_UPD_XLT_ENABLE BIT(1) +#define MLX5_IB_UPD_XLT_ATOMIC BIT(2) +#define MLX5_IB_UPD_XLT_ADDR BIT(3) +#define MLX5_IB_UPD_XLT_PD BIT(4) +#define MLX5_IB_UPD_XLT_ACCESS BIT(5) +#define MLX5_IB_UPD_XLT_INDIRECT BIT(6) + /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. * * These flags are intended for internal use by the mlx5_ib driver, and they @@ -207,6 +221,10 @@ struct wr_list { u16 next; }; +enum mlx5_ib_rq_flags { + MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, +}; + struct mlx5_ib_wq { u64 *wrid; u32 *wr_data; @@ -264,29 +282,6 @@ struct mlx5_ib_rwq_ind_table { u32 rqtn; }; -/* - * Connect-IB can trigger up to four concurrent pagefaults - * per-QP. - */ -enum mlx5_ib_pagefault_context { - MLX5_IB_PAGEFAULT_RESPONDER_READ, - MLX5_IB_PAGEFAULT_REQUESTOR_READ, - MLX5_IB_PAGEFAULT_RESPONDER_WRITE, - MLX5_IB_PAGEFAULT_REQUESTOR_WRITE, - MLX5_IB_PAGEFAULT_CONTEXTS -}; - -static inline enum mlx5_ib_pagefault_context - mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault) -{ - return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE); -} - -struct mlx5_ib_pfault { - struct work_struct work; - struct mlx5_pagefault mpfault; -}; - struct mlx5_ib_ubuffer { struct ib_umem *umem; int buf_size; @@ -318,6 +313,7 @@ struct mlx5_ib_rq { struct mlx5_db *doorbell; u32 tirn; u8 state; + u32 flags; }; struct mlx5_ib_sq { @@ -334,6 +330,12 @@ struct mlx5_ib_raw_packet_qp { struct mlx5_ib_rq rq; }; +struct mlx5_bf { + int buf_size; + unsigned long offset; + struct mlx5_sq_bfreg *bfreg; +}; + struct mlx5_ib_qp { struct ib_qp ibqp; union { @@ -359,33 +361,19 @@ struct mlx5_ib_qp { int wq_sig; int scat_cqe; int max_inline_data; - struct mlx5_bf *bf; + struct mlx5_bf bf; int has_rq; /* only for user space QPs. For kernel * we have it from the bf object */ - int uuarn; + int bfregn; int create_type; /* Store signature errors */ bool signature_en; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - /* - * A flag that is true for QP's that are in a state that doesn't - * allow page faults, and shouldn't schedule any more faults. - */ - int disable_page_faults; - /* - * The disable_page_faults_lock protects a QP's disable_page_faults - * field, allowing for a thread to atomically check whether the QP - * allows page faults, and if so schedule a page fault. - */ - spinlock_t disable_page_faults_lock; - struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; -#endif struct list_head qps_list; struct list_head cq_recv_list; struct list_head cq_send_list; @@ -410,17 +398,16 @@ enum mlx5_ib_qp_flags { MLX5_IB_QP_SQPN_QP1 = 1 << 6, MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, MLX5_IB_QP_RSS = 1 << 8, + MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9, }; struct mlx5_umr_wr { struct ib_send_wr wr; - union { - u64 virt_addr; - u64 offset; - } target; + u64 virt_addr; + u64 offset; struct ib_pd *pd; unsigned int page_shift; - unsigned int npages; + unsigned int xlt_size; u64 length; int access_flags; u32 mkey; @@ -517,6 +504,10 @@ struct mlx5_ib_mr { int live; void *descs_alloc; int access_flags; /* Needed for rereg MR */ + + struct mlx5_ib_mr *parent; + atomic_t num_leaf_free; + wait_queue_head_t q_leaf_free; }; struct mlx5_ib_mw { @@ -555,6 +546,10 @@ struct mlx5_cache_ent { struct dentry *dir; char name[4]; u32 order; + u32 xlt; + u32 access_mode; + u32 page; + u32 size; u32 cur; u32 miss; @@ -569,6 +564,7 @@ struct mlx5_cache_ent { struct work_struct work; struct delayed_work dwork; int pending; + struct completion compl; }; struct mlx5_mr_cache { @@ -599,8 +595,15 @@ struct mlx5_ib_resources { struct mutex mutex; }; +struct mlx5_ib_q_counters { + const char **names; + size_t *offsets; + u32 num_counters; + u16 set_id; +}; + struct mlx5_ib_port { - u16 q_cnt_id; + struct mlx5_ib_q_counters q_cnts; }; struct mlx5_roce { @@ -617,7 +620,6 @@ struct mlx5_ib_dev { struct ib_device ib_dev; struct mlx5_core_dev *mdev; struct mlx5_roce roce; - MLX5_DECLARE_DOORBELL_LOCK(uar_lock); int num_ports; /* serialize update of capability mask */ @@ -634,11 +636,13 @@ struct mlx5_ib_dev { int fill_delay; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_odp_caps odp_caps; + u64 odp_max_size; /* * Sleepable RCU that prevents destruction of MRs while they are still * being used by a page fault handler. */ struct srcu_struct mr_srcu; + u32 null_mkey; #endif struct mlx5_ib_flow_db flow_db; /* protect resources needed as part of reset flow */ @@ -646,6 +650,8 @@ struct mlx5_ib_dev { struct list_head qp_list; /* Array with num_ports elements */ struct mlx5_ib_port *port; + struct mlx5_sq_bfreg bfreg; + struct mlx5_sq_bfreg fp_bfreg; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@ -787,8 +793,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); int mlx5_ib_dealloc_mw(struct ib_mw *mw); -int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, - int npages, int zap); +int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, + int page_shift, int flags); +struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, + int access_flags); +void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_pd *pd, struct ib_udata *udata); @@ -842,7 +851,9 @@ void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); -int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); + +struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); +void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, struct ib_mr_status *mr_status); struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, @@ -857,33 +868,32 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -extern struct workqueue_struct *mlx5_ib_page_fault_wq; - void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); -void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, - struct mlx5_ib_pfault *pfault); -void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); +void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, + struct mlx5_pagefault *pfault); int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); -void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); -void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); +void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); +void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, + size_t nentries, struct mlx5_ib_mr *mr, int flags); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { return; } -static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } -static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {} +static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {} static inline int mlx5_ib_odp_init(void) { return 0; } -static inline void mlx5_ib_odp_cleanup(void) {} -static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} -static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} +static inline void mlx5_ib_odp_cleanup(void) {} +static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} +static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, + size_t nentries, struct mlx5_ib_mr *mr, + int flags) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ @@ -898,6 +908,8 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, int index); +int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num, + int index, enum ib_gid_type *gid_type); /* GSI QP helper functions */ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, @@ -1001,4 +1013,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); } + +static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) +{ + return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? + MLX5_UARS_IN_PAGE : 1; +} + +static inline int get_num_uars(struct mlx5_ib_dev *dev, + struct mlx5_bfreg_info *bfregi) +{ + return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages; +} + #endif /* MLX5_IB_H */ |