2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
41 #include <linux/mlx4/cmd.h>
46 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
47 #define MLX4_MPT_FLAG_FREE (0x3UL << 28)
48 #define MLX4_MPT_FLAG_MIO (1 << 17)
49 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
50 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
51 #define MLX4_MPT_FLAG_REGION (1 << 8)
53 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
54 #define MLX4_MPT_PD_FLAG_RAE (1 << 28)
55 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
57 #define MLX4_MPT_STATUS_SW 0xF0
58 #define MLX4_MPT_STATUS_HW 0x00
60 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
66 spin_lock(&buddy->lock);
68 for (o = order; o <= buddy->max_order; ++o)
69 if (buddy->num_free[o]) {
70 m = 1 << (buddy->max_order - o);
71 seg = find_first_bit(buddy->bits[o], m);
76 spin_unlock(&buddy->lock);
80 clear_bit(seg, buddy->bits[o]);
86 set_bit(seg ^ 1, buddy->bits[o]);
90 spin_unlock(&buddy->lock);
97 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
101 spin_lock(&buddy->lock);
103 while (test_bit(seg ^ 1, buddy->bits[order])) {
104 clear_bit(seg ^ 1, buddy->bits[order]);
105 --buddy->num_free[order];
110 set_bit(seg, buddy->bits[order]);
111 ++buddy->num_free[order];
113 spin_unlock(&buddy->lock);
116 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
120 buddy->max_order = max_order;
121 spin_lock_init(&buddy->lock);
123 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
125 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
127 if (!buddy->bits || !buddy->num_free)
130 for (i = 0; i <= buddy->max_order; ++i) {
131 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
132 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
135 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
138 set_bit(0, buddy->bits[buddy->max_order]);
139 buddy->num_free[buddy->max_order] = 1;
144 for (i = 0; i <= buddy->max_order; ++i)
145 kfree(buddy->bits[i]);
149 kfree(buddy->num_free);
154 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
158 for (i = 0; i <= buddy->max_order; ++i)
159 kfree(buddy->bits[i]);
162 kfree(buddy->num_free);
165 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
167 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
170 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
174 if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
175 seg + (1 << order) - 1)) {
176 mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
183 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
189 if (mlx4_is_mfunc(dev)) {
190 set_param_l(&in_param, order);
191 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
192 RES_OP_RESERVE_AND_MAP,
194 MLX4_CMD_TIME_CLASS_A,
198 return get_param_l(&out_param);
200 return __mlx4_alloc_mtt_range(dev, order);
203 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
204 struct mlx4_mtt *mtt)
210 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
213 mtt->page_shift = page_shift;
215 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
218 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
219 if (mtt->first_seg == -1)
224 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
226 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg,
229 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
231 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order);
232 mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
233 first_seg + (1 << order) - 1);
236 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order)
241 if (mlx4_is_mfunc(dev)) {
242 set_param_l(&in_param, first_seg);
243 set_param_h(&in_param, order);
244 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
246 MLX4_CMD_TIME_CLASS_A,
249 mlx4_warn(dev, "Failed to free mtt range at:%d"
250 " order:%d\n", first_seg, order);
253 __mlx4_free_mtt_range(dev, first_seg, order);
256 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
261 mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order);
263 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
265 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
267 return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
269 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
271 static u32 hw_index_to_key(u32 ind)
273 return (ind >> 24) | (ind << 8);
276 static u32 key_to_hw_index(u32 key)
278 return (key << 24) | (key >> 8);
281 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
284 return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
285 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
289 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
292 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
293 !mailbox, MLX4_CMD_HW2SW_MPT,
294 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
297 static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
300 struct mlx4_priv *priv = mlx4_priv(dev);
303 mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
311 EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
313 static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
315 struct mlx4_priv *priv = mlx4_priv(dev);
316 mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
318 EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
320 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
321 u64 iova, u64 size, u32 access, int npages,
322 int page_shift, struct mlx4_mr *mr)
328 mr->enabled = MLX4_MR_DISABLED;
329 mr->key = hw_index_to_key(mridx);
331 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
333 EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
335 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
336 struct mlx4_cmd_mailbox *mailbox,
339 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
340 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
343 int __mlx4_mr_reserve(struct mlx4_dev *dev)
345 struct mlx4_priv *priv = mlx4_priv(dev);
347 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
350 static int mlx4_mr_reserve(struct mlx4_dev *dev)
354 if (mlx4_is_mfunc(dev)) {
355 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
357 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
359 return get_param_l(&out_param);
361 return __mlx4_mr_reserve(dev);
364 void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
366 struct mlx4_priv *priv = mlx4_priv(dev);
368 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
371 static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
375 if (mlx4_is_mfunc(dev)) {
376 set_param_l(&in_param, index);
377 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
379 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
380 mlx4_warn(dev, "Failed to release mr index:%d\n",
384 __mlx4_mr_release(dev, index);
387 int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
389 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
391 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
394 static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
398 if (mlx4_is_mfunc(dev)) {
399 set_param_l(¶m, index);
400 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
402 MLX4_CMD_TIME_CLASS_A,
405 return __mlx4_mr_alloc_icm(dev, index);
408 void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
410 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
412 mlx4_table_put(dev, &mr_table->dmpt_table, index);
415 static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
419 if (mlx4_is_mfunc(dev)) {
420 set_param_l(&in_param, index);
421 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
422 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
424 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
428 return __mlx4_mr_free_icm(dev, index);
431 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
432 int npages, int page_shift, struct mlx4_mr *mr)
437 index = mlx4_mr_reserve(dev);
441 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
442 access, npages, page_shift, mr);
444 mlx4_mr_release(dev, index);
448 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
450 static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
454 if (mr->enabled == MLX4_MR_EN_HW) {
455 err = mlx4_HW2SW_MPT(dev, NULL,
456 key_to_hw_index(mr->key) &
457 (dev->caps.num_mpts - 1));
459 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
461 mr->enabled = MLX4_MR_EN_SW;
463 mlx4_mtt_cleanup(dev, &mr->mtt);
465 EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
467 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
469 mlx4_mr_free_reserved(dev, mr);
471 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
472 mlx4_mr_release(dev, key_to_hw_index(mr->key));
474 EXPORT_SYMBOL_GPL(mlx4_mr_free);
476 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
478 struct mlx4_cmd_mailbox *mailbox;
479 struct mlx4_mpt_entry *mpt_entry;
482 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
486 mailbox = mlx4_alloc_cmd_mailbox(dev);
487 if (IS_ERR(mailbox)) {
488 err = PTR_ERR(mailbox);
491 mpt_entry = mailbox->buf;
493 memset(mpt_entry, 0, sizeof *mpt_entry);
495 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
496 MLX4_MPT_FLAG_REGION |
499 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
500 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
501 mpt_entry->start = cpu_to_be64(mr->iova);
502 mpt_entry->length = cpu_to_be64(mr->size);
503 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
505 if (mr->mtt.order < 0) {
506 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
507 mpt_entry->mtt_seg = 0;
509 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
512 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
513 /* fast register MR in free state */
514 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
515 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
516 MLX4_MPT_PD_FLAG_RAE);
517 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
518 dev->caps.mtts_per_seg);
520 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
523 err = mlx4_SW2HW_MPT(dev, mailbox,
524 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
526 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
529 mr->enabled = MLX4_MR_EN_HW;
531 mlx4_free_cmd_mailbox(dev, mailbox);
536 mlx4_free_cmd_mailbox(dev, mailbox);
539 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
542 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
544 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
545 int start_index, int npages, u64 *page_list)
547 struct mlx4_priv *priv = mlx4_priv(dev);
549 dma_addr_t dma_handle;
551 int s = start_index * sizeof (u64);
553 /* All MTTs must fit in the same page */
554 if (start_index / (PAGE_SIZE / sizeof (u64)) !=
555 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
558 if (start_index & (dev->caps.mtts_per_seg - 1))
561 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
562 s / dev->caps.mtt_entry_sz, &dma_handle);
566 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
567 npages * sizeof (u64), DMA_TO_DEVICE);
569 for (i = 0; i < npages; ++i)
570 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
572 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
573 npages * sizeof (u64), DMA_TO_DEVICE);
578 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
579 int start_index, int npages, u64 *page_list)
585 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
586 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
590 start_index += chunk;
596 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
597 int start_index, int npages, u64 *page_list)
599 struct mlx4_cmd_mailbox *mailbox = NULL;
600 __be64 *inbox = NULL;
608 if (mlx4_is_mfunc(dev)) {
609 mailbox = mlx4_alloc_cmd_mailbox(dev);
611 return PTR_ERR(mailbox);
612 inbox = mailbox->buf;
615 int s = mtt->first_seg * dev->caps.mtts_per_seg +
617 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) -
618 dev->caps.mtts_per_seg, npages);
619 if (s / (PAGE_SIZE / sizeof(u64)) !=
620 (s + chunk - 1) / (PAGE_SIZE / sizeof(u64)))
621 chunk = PAGE_SIZE / sizeof(u64) -
622 (s % (PAGE_SIZE / sizeof(u64)));
624 inbox[0] = cpu_to_be64(mtt->first_seg *
625 dev->caps.mtts_per_seg +
628 for (i = 0; i < chunk; ++i)
629 inbox[i + 2] = cpu_to_be64(page_list[i] |
630 MLX4_MTT_FLAG_PRESENT);
631 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
633 mlx4_free_cmd_mailbox(dev, mailbox);
638 start_index += chunk;
641 mlx4_free_cmd_mailbox(dev, mailbox);
645 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
647 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
649 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
650 struct mlx4_buf *buf)
656 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
660 for (i = 0; i < buf->npages; ++i)
662 page_list[i] = buf->direct.map + (i << buf->page_shift);
664 page_list[i] = buf->page_list[i].map;
666 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
671 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
673 int mlx4_init_mr_table(struct mlx4_dev *dev)
675 struct mlx4_priv *priv = mlx4_priv(dev);
676 struct mlx4_mr_table *mr_table = &priv->mr_table;
679 if (!is_power_of_2(dev->caps.num_mpts))
682 /* Nothing to do for slaves - all MR handling is forwarded
684 if (mlx4_is_slave(dev))
687 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
688 ~0, dev->caps.reserved_mrws, 0);
692 err = mlx4_buddy_init(&mr_table->mtt_buddy,
693 ilog2(dev->caps.num_mtt_segs));
697 if (dev->caps.reserved_mtts) {
698 priv->reserved_mtts =
699 mlx4_alloc_mtt_range(dev,
700 fls(dev->caps.reserved_mtts - 1));
701 if (priv->reserved_mtts < 0) {
702 mlx4_warn(dev, "MTT table of order %d is too small.\n",
703 mr_table->mtt_buddy.max_order);
705 goto err_reserve_mtts;
712 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
715 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
720 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
722 struct mlx4_priv *priv = mlx4_priv(dev);
723 struct mlx4_mr_table *mr_table = &priv->mr_table;
725 if (mlx4_is_slave(dev))
727 if (priv->reserved_mtts >= 0)
728 mlx4_free_mtt_range(dev, priv->reserved_mtts,
729 fls(dev->caps.reserved_mtts - 1));
730 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
731 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
734 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
735 int npages, u64 iova)
739 if (npages > fmr->max_pages)
742 page_mask = (1 << fmr->page_shift) - 1;
744 /* We are getting page lists, so va must be page aligned. */
745 if (iova & page_mask)
748 /* Trust the user not to pass misaligned data in page_list */
750 for (i = 0; i < npages; ++i) {
751 if (page_list[i] & ~page_mask)
755 if (fmr->maps >= fmr->max_maps)
761 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
762 int npages, u64 iova, u32 *lkey, u32 *rkey)
767 err = mlx4_check_fmr(fmr, page_list, npages, iova);
773 key = key_to_hw_index(fmr->mr.key);
774 key += dev->caps.num_mpts;
775 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
777 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
779 /* Make sure MPT status is visible before writing MTT entries */
782 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
783 npages * sizeof(u64), DMA_TO_DEVICE);
785 for (i = 0; i < npages; ++i)
786 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
788 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
789 npages * sizeof(u64), DMA_TO_DEVICE);
791 fmr->mpt->key = cpu_to_be32(key);
792 fmr->mpt->lkey = cpu_to_be32(key);
793 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
794 fmr->mpt->start = cpu_to_be64(iova);
796 /* Make MTT entries are visible before setting MPT status */
799 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
801 /* Make sure MPT status is visible before consumer can use FMR */
806 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
808 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
809 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
811 struct mlx4_priv *priv = mlx4_priv(dev);
815 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
818 /* All MTTs must fit in the same page */
819 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
822 fmr->page_shift = page_shift;
823 fmr->max_pages = max_pages;
824 fmr->max_maps = max_maps;
827 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
828 page_shift, &fmr->mr);
832 mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
834 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
835 fmr->mr.mtt.first_seg,
845 mlx4_mr_free(dev, &fmr->mr);
848 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
850 static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
851 u32 pd, u32 access, int max_pages,
852 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
854 struct mlx4_priv *priv = mlx4_priv(dev);
857 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
860 /* All MTTs must fit in the same page */
861 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
864 fmr->page_shift = page_shift;
865 fmr->max_pages = max_pages;
866 fmr->max_maps = max_maps;
869 err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
870 page_shift, &fmr->mr);
874 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
875 fmr->mr.mtt.first_seg,
885 mlx4_mr_free_reserved(dev, &fmr->mr);
888 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
890 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
892 struct mlx4_priv *priv = mlx4_priv(dev);
895 err = mlx4_mr_enable(dev, &fmr->mr);
899 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
900 key_to_hw_index(fmr->mr.key), NULL);
906 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
908 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
909 u32 *lkey, u32 *rkey)
911 struct mlx4_cmd_mailbox *mailbox;
919 mailbox = mlx4_alloc_cmd_mailbox(dev);
920 if (IS_ERR(mailbox)) {
921 err = PTR_ERR(mailbox);
922 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
923 " failed (%d)\n", err);
927 err = mlx4_HW2SW_MPT(dev, NULL,
928 key_to_hw_index(fmr->mr.key) &
929 (dev->caps.num_mpts - 1));
930 mlx4_free_cmd_mailbox(dev, mailbox);
932 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
936 fmr->mr.enabled = MLX4_MR_EN_SW;
938 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
940 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
945 mlx4_mr_free(dev, &fmr->mr);
946 fmr->mr.enabled = MLX4_MR_DISABLED;
950 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
952 static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
957 mlx4_mr_free_reserved(dev, &fmr->mr);
958 fmr->mr.enabled = MLX4_MR_DISABLED;
962 EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
964 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
966 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
969 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);