Lines Matching refs:mr
136 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
138 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
140 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
145 struct mlx5_ib_mr *mr = in create_mkey_callback() local
147 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback()
153 kfree(mr); in create_mkey_callback()
162 mr->mmkey.type = MLX5_MKEY_MR; in create_mkey_callback()
163 mr->mmkey.key |= mlx5_idx_to_mkey( in create_mkey_callback()
164 MLX5_GET(create_mkey_out, mr->out, mkey_index)); in create_mkey_callback()
165 init_waitqueue_head(&mr->mmkey.wait); in create_mkey_callback()
170 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
181 struct mlx5_ib_mr *mr; in alloc_cache_mr() local
183 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in alloc_cache_mr()
184 if (!mr) in alloc_cache_mr()
186 mr->cache_ent = ent; in alloc_cache_mr()
196 return mr; in alloc_cache_mr()
203 struct mlx5_ib_mr *mr; in add_keys() local
215 mr = alloc_cache_mr(ent, mkc); in add_keys()
216 if (!mr) { in add_keys()
224 kfree(mr); in add_keys()
229 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
231 mr->out, sizeof(mr->out), in add_keys()
232 &mr->cb_work); in add_keys()
238 kfree(mr); in add_keys()
251 struct mlx5_ib_mr *mr; in create_cache_mr() local
261 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
262 if (!mr) { in create_cache_mr()
267 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); in create_cache_mr()
271 init_waitqueue_head(&mr->mmkey.wait); in create_cache_mr()
272 mr->mmkey.type = MLX5_MKEY_MR; in create_cache_mr()
278 return mr; in create_cache_mr()
280 kfree(mr); in create_cache_mr()
288 struct mlx5_ib_mr *mr; in remove_cache_mr_locked() local
293 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
294 list_del(&mr->list); in remove_cache_mr_locked()
298 mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); in remove_cache_mr_locked()
299 kfree(mr); in remove_cache_mr_locked()
575 struct mlx5_ib_mr *mr; in mlx5_mr_cache_alloc() local
589 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
590 if (IS_ERR(mr)) in mlx5_mr_cache_alloc()
591 return mr; in mlx5_mr_cache_alloc()
593 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
594 list_del(&mr->list); in mlx5_mr_cache_alloc()
599 mlx5_clear_mr(mr); in mlx5_mr_cache_alloc()
601 mr->access_flags = access_flags; in mlx5_mr_cache_alloc()
602 return mr; in mlx5_mr_cache_alloc()
608 struct mlx5_ib_mr *mr = NULL; in get_cache_mr() local
613 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in get_cache_mr()
614 list_del(&mr->list); in get_cache_mr()
618 mlx5_clear_mr(mr); in get_cache_mr()
619 return mr; in get_cache_mr()
627 static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in mlx5_mr_cache_free() argument
629 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free()
632 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
643 struct mlx5_ib_mr *mr; in clean_keys() local
653 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
654 list_move(&mr->list, &del_list); in clean_keys()
658 mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in clean_keys()
661 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { in clean_keys()
662 list_del(&mr->list); in clean_keys()
663 kfree(mr); in clean_keys()
793 struct mlx5_ib_mr *mr; in mlx5_ib_get_dma_mr() local
798 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dma_mr()
799 if (!mr) in mlx5_ib_get_dma_mr()
815 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
820 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
821 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
822 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
823 mr->umem = NULL; in mlx5_ib_get_dma_mr()
825 return &mr->ibmr; in mlx5_ib_get_dma_mr()
831 kfree(mr); in mlx5_ib_get_dma_mr()
910 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, in set_mr_fields() argument
913 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
914 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
915 mr->ibmr.length = length; in set_mr_fields()
916 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
917 mr->ibmr.iova = iova; in set_mr_fields()
918 mr->access_flags = access_flags; in set_mr_fields()
938 struct mlx5_ib_mr *mr; in alloc_cacheable_mr() local
957 mr = reg_create(pd, umem, iova, access_flags, page_size, false); in alloc_cacheable_mr()
959 return mr; in alloc_cacheable_mr()
962 mr = get_cache_mr(ent); in alloc_cacheable_mr()
963 if (!mr) { in alloc_cacheable_mr()
964 mr = create_cache_mr(ent); in alloc_cacheable_mr()
969 if (IS_ERR(mr)) in alloc_cacheable_mr()
970 return mr; in alloc_cacheable_mr()
973 mr->ibmr.pd = pd; in alloc_cacheable_mr()
974 mr->umem = umem; in alloc_cacheable_mr()
975 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
976 set_mr_fields(dev, mr, umem->length, access_flags, iova); in alloc_cacheable_mr()
978 return mr; in alloc_cacheable_mr()
1053 static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr, in mlx5_ib_create_xlt_wr() argument
1058 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5_ib_create_xlt_wr()
1083 wr->pd = mr->ibmr.pd; in mlx5_ib_create_xlt_wr()
1084 wr->mkey = mr->mmkey.key; in mlx5_ib_create_xlt_wr()
1085 wr->length = mr->ibmr.length; in mlx5_ib_create_xlt_wr()
1086 wr->virt_addr = mr->ibmr.iova; in mlx5_ib_create_xlt_wr()
1087 wr->access_flags = mr->access_flags; in mlx5_ib_create_xlt_wr()
1088 wr->page_shift = mr->page_shift; in mlx5_ib_create_xlt_wr()
1117 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, in mlx5_ib_update_xlt() argument
1120 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5_ib_update_xlt()
1141 if (WARN_ON(!mr->umem->is_odp)) in mlx5_ib_update_xlt()
1153 xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags); in mlx5_ib_update_xlt()
1160 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in mlx5_ib_update_xlt()
1175 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); in mlx5_ib_update_xlt()
1199 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) in mlx5_ib_update_mr_pas() argument
1201 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5_ib_update_mr_pas()
1212 if (WARN_ON(mr->umem->is_odp)) in mlx5_ib_update_mr_pas()
1215 mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, in mlx5_ib_update_mr_pas()
1216 ib_umem_num_dma_blocks(mr->umem, in mlx5_ib_update_mr_pas()
1217 1 << mr->page_shift), in mlx5_ib_update_mr_pas()
1224 rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, in mlx5_ib_update_mr_pas()
1225 mr->umem->sgt_append.sgt.nents, in mlx5_ib_update_mr_pas()
1226 BIT(mr->page_shift)) { in mlx5_ib_update_mr_pas()
1243 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) in mlx5_ib_update_mr_pas()
1273 struct mlx5_ib_mr *mr; in reg_create() local
1283 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in reg_create()
1284 if (!mr) in reg_create()
1287 mr->ibmr.pd = pd; in reg_create()
1288 mr->access_flags = access_flags; in reg_create()
1289 mr->page_shift = order_base_2(page_size); in reg_create()
1306 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1324 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1325 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1328 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1331 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1336 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1337 mr->umem = umem; in reg_create()
1338 set_mr_fields(dev, mr, umem->length, access_flags, iova); in reg_create()
1341 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1343 return mr; in reg_create()
1348 kfree(mr); in reg_create()
1357 struct mlx5_ib_mr *mr; in mlx5_ib_get_dm_mr() local
1362 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_get_dm_mr()
1363 if (!mr) in mlx5_ib_get_dm_mr()
1379 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1385 set_mr_fields(dev, mr, length, acc, start_addr); in mlx5_ib_get_dm_mr()
1387 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1393 kfree(mr); in mlx5_ib_get_dm_mr()
1450 struct mlx5_ib_mr *mr = NULL; in create_real_mr() local
1456 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); in create_real_mr()
1462 mr = reg_create(pd, umem, iova, access_flags, page_size, true); in create_real_mr()
1465 if (IS_ERR(mr)) { in create_real_mr()
1467 return ERR_CAST(mr); in create_real_mr()
1470 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1480 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); in create_real_mr()
1482 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1486 return &mr->ibmr; in create_real_mr()
1495 struct mlx5_ib_mr *mr; in create_user_odp_mr() local
1510 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); in create_user_odp_mr()
1511 if (IS_ERR(mr)) in create_user_odp_mr()
1512 return ERR_CAST(mr); in create_user_odp_mr()
1513 return &mr->ibmr; in create_user_odp_mr()
1525 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1526 if (IS_ERR(mr)) { in create_user_odp_mr()
1528 return ERR_CAST(mr); in create_user_odp_mr()
1530 xa_init(&mr->implicit_children); in create_user_odp_mr()
1532 odp->private = mr; in create_user_odp_mr()
1533 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1537 err = mlx5_ib_init_odp_mr(mr); in create_user_odp_mr()
1540 return &mr->ibmr; in create_user_odp_mr()
1543 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1572 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb() local
1579 mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); in mlx5_ib_dmabuf_invalidate_cb()
1594 struct mlx5_ib_mr *mr = NULL; in mlx5_ib_reg_user_mr_dmabuf() local
1619 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1621 if (IS_ERR(mr)) { in mlx5_ib_reg_user_mr_dmabuf()
1623 return ERR_CAST(mr); in mlx5_ib_reg_user_mr_dmabuf()
1626 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1628 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1629 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1630 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1634 err = mlx5_ib_init_dmabuf_mr(mr); in mlx5_ib_reg_user_mr_dmabuf()
1637 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1640 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1652 static int revoke_mr(struct mlx5_ib_mr *mr) in revoke_mr() argument
1656 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) in revoke_mr()
1662 umrwr.pd = mr_to_mdev(mr)->umrc.pd; in revoke_mr()
1663 umrwr.mkey = mr->mmkey.key; in revoke_mr()
1666 return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr); in revoke_mr()
1686 static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pd_access() argument
1689 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pd_access()
1696 .mkey = mr->mmkey.key, in umr_rereg_pd_access()
1706 mr->access_flags = access_flags; in umr_rereg_pd_access()
1710 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, in can_use_umr_rereg_pas() argument
1715 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1718 if (!mr->cache_ent) in can_use_umr_rereg_pas()
1727 return (1ULL << mr->cache_ent->order) >= in can_use_umr_rereg_pas()
1731 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, in umr_rereg_pas() argument
1735 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1737 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1745 err = revoke_mr(mr); in umr_rereg_pas()
1750 mr->ibmr.pd = pd; in umr_rereg_pas()
1754 mr->access_flags = access_flags; in umr_rereg_pas()
1758 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1759 mr->ibmr.iova = iova; in umr_rereg_pas()
1760 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1761 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1762 mr->umem = new_umem; in umr_rereg_pas()
1763 err = mlx5_ib_update_mr_pas(mr, upd_flags); in umr_rereg_pas()
1769 mr->umem = old_umem; in umr_rereg_pas()
1785 struct mlx5_ib_mr *mr = to_mmr(ib_mr); in mlx5_ib_rereg_user_mr() local
1800 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1808 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1810 err = umr_rereg_pd_access(mr, new_pd, new_access_flags); in mlx5_ib_rereg_user_mr()
1816 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1823 err = revoke_mr(mr); in mlx5_ib_rereg_user_mr()
1826 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1827 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1830 return create_real_mr(new_pd, umem, mr->ibmr.iova, in mlx5_ib_rereg_user_mr()
1838 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1842 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1852 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, in mlx5_ib_rereg_user_mr()
1854 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, in mlx5_ib_rereg_user_mr()
1876 struct mlx5_ib_mr *mr, in mlx5_alloc_priv_descs() argument
1888 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1889 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1892 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1894 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1895 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1902 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1908 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) in mlx5_free_priv_descs() argument
1910 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1911 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1912 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1915 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1917 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1918 mr->descs = NULL; in mlx5_free_priv_descs()
1924 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_dereg_mr() local
1934 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1935 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1936 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1939 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1940 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1942 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1943 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1946 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1948 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1949 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1952 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1956 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1958 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1959 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1961 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1962 kfree(mr->sig); in mlx5_ib_dereg_mr()
1963 mr->sig = NULL; in mlx5_ib_dereg_mr()
1967 if (mr->cache_ent) { in mlx5_ib_dereg_mr()
1968 if (revoke_mr(mr)) { in mlx5_ib_dereg_mr()
1969 spin_lock_irq(&mr->cache_ent->lock); in mlx5_ib_dereg_mr()
1970 mr->cache_ent->total_mrs--; in mlx5_ib_dereg_mr()
1971 spin_unlock_irq(&mr->cache_ent->lock); in mlx5_ib_dereg_mr()
1972 mr->cache_ent = NULL; in mlx5_ib_dereg_mr()
1975 if (!mr->cache_ent) { in mlx5_ib_dereg_mr()
1976 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1981 if (mr->umem) { in mlx5_ib_dereg_mr()
1982 bool is_odp = is_odp_mr(mr); in mlx5_ib_dereg_mr()
1985 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1987 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1989 mlx5_ib_free_odp_mr(mr); in mlx5_ib_dereg_mr()
1992 if (mr->cache_ent) { in mlx5_ib_dereg_mr()
1993 mlx5_mr_cache_free(dev, mr); in mlx5_ib_dereg_mr()
1995 mlx5_free_priv_descs(mr); in mlx5_ib_dereg_mr()
1996 kfree(mr); in mlx5_ib_dereg_mr()
2018 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in _mlx5_alloc_mkey_descs() argument
2025 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
2026 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
2027 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
2029 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
2035 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
2039 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
2040 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2041 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2046 mlx5_free_priv_descs(mr); in _mlx5_alloc_mkey_descs()
2057 struct mlx5_ib_mr *mr; in mlx5_ib_alloc_pi_mr() local
2061 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx5_ib_alloc_pi_mr()
2062 if (!mr) in mlx5_ib_alloc_pi_mr()
2065 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2066 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2077 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, in mlx5_ib_alloc_pi_mr()
2082 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2085 return mr; in mlx5_ib_alloc_pi_mr()
2090 kfree(mr); in mlx5_ib_alloc_pi_mr()
2094 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_mem_reg_descs() argument
2097 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), in mlx5_alloc_mem_reg_descs()
2102 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_sg_gaps_descs() argument
2105 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), in mlx5_alloc_sg_gaps_descs()
2109 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, in mlx5_alloc_integrity_descs() argument
2118 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2119 if (!mr->sig) in mlx5_alloc_integrity_descs()
2127 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2128 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2130 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2131 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2133 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2134 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2137 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2138 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2141 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2144 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2145 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2154 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, in mlx5_alloc_integrity_descs()
2159 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2160 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2166 destroy_mkey(dev, mr); in mlx5_alloc_integrity_descs()
2167 mlx5_free_priv_descs(mr); in mlx5_alloc_integrity_descs()
2169 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2170 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2172 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2173 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2175 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2177 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2178 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2180 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2182 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2194 struct mlx5_ib_mr *mr; in __mlx5_ib_alloc_mr() local
2198 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in __mlx5_ib_alloc_mr()
2199 if (!mr) in __mlx5_ib_alloc_mr()
2208 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2209 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2213 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2216 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); in __mlx5_ib_alloc_mr()
2219 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, in __mlx5_ib_alloc_mr()
2232 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2237 kfree(mr); in __mlx5_ib_alloc_mr()
2395 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_pa_mr_sg_pi() local
2399 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2402 mr->mmkey.ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2405 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2406 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2409 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2414 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2415 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2417 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2424 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, in mlx5_ib_sg_to_klms() argument
2433 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2435 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2438 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2439 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2442 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2447 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2455 mr->mmkey.ndescs = i; in mlx5_ib_sg_to_klms()
2456 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2462 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2469 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2476 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2477 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2485 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page() local
2488 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) in mlx5_set_page()
2491 descs = mr->descs; in mlx5_set_page()
2492 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2499 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_set_page_pi() local
2502 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2505 descs = mr->descs; in mlx5_set_page_pi()
2506 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2518 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mtt_mr_sg_pi() local
2519 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2583 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_klm_mr_sg_pi() local
2584 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2616 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg_pi() local
2622 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2623 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2624 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2625 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2626 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2646 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2653 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2663 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2667 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2675 struct mlx5_ib_mr *mr = to_mmr(ibmr); in mlx5_ib_map_mr_sg() local
2678 mr->mmkey.ndescs = 0; in mlx5_ib_map_mr_sg()
2680 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2681 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2684 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2685 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, in mlx5_ib_map_mr_sg()
2691 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2692 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()