2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
49 #define MLX4_MAC_VALID (1ull << 63)
52 struct list_head list;
58 struct list_head list;
73 struct list_head list;
75 enum mlx4_protocol prot;
76 enum mlx4_steer_type steer;
80 RES_QP_BUSY = RES_ANY_BUSY,
82 /* QP number was allocated */
85 /* ICM memory for QP context was mapped */
88 /* QP is in hw ownership */
93 struct res_common com;
98 struct list_head mcg_list;
103 enum res_mtt_states {
104 RES_MTT_BUSY = RES_ANY_BUSY,
108 static inline const char *mtt_states_str(enum res_mtt_states state)
111 case RES_MTT_BUSY: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
118 struct res_common com;
123 enum res_mpt_states {
124 RES_MPT_BUSY = RES_ANY_BUSY,
131 struct res_common com;
137 RES_EQ_BUSY = RES_ANY_BUSY,
143 struct res_common com;
148 RES_CQ_BUSY = RES_ANY_BUSY,
154 struct res_common com;
159 enum res_srq_states {
160 RES_SRQ_BUSY = RES_ANY_BUSY,
166 struct res_common com;
172 enum res_counter_states {
173 RES_COUNTER_BUSY = RES_ANY_BUSY,
174 RES_COUNTER_ALLOCATED,
178 struct res_common com;
182 enum res_xrcdn_states {
183 RES_XRCD_BUSY = RES_ANY_BUSY,
188 struct res_common com;
192 enum res_fs_rule_states {
193 RES_FS_RULE_BUSY = RES_ANY_BUSY,
194 RES_FS_RULE_ALLOCATED,
198 struct res_common com;
201 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 struct rb_node *node = root->rb_node;
206 struct res_common *res = container_of(node, struct res_common,
209 if (res_id < res->res_id)
210 node = node->rb_left;
211 else if (res_id > res->res_id)
212 node = node->rb_right;
219 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 struct rb_node **new = &(root->rb_node), *parent = NULL;
223 /* Figure out where to put new node */
225 struct res_common *this = container_of(*new, struct res_common,
229 if (res->res_id < this->res_id)
230 new = &((*new)->rb_left);
231 else if (res->res_id > this->res_id)
232 new = &((*new)->rb_right);
237 /* Add new node and rebalance tree. */
238 rb_link_node(&res->node, parent, new);
239 rb_insert_color(&res->node, root);
245 static const char *ResourceType(enum mlx4_resource rt)
248 case RES_QP: return "RES_QP";
249 case RES_CQ: return "RES_CQ";
250 case RES_SRQ: return "RES_SRQ";
251 case RES_MPT: return "RES_MPT";
252 case RES_MTT: return "RES_MTT";
253 case RES_MAC: return "RES_MAC";
254 case RES_EQ: return "RES_EQ";
255 case RES_COUNTER: return "RES_COUNTER";
256 case RES_FS_RULE: return "RES_FS_RULE";
257 case RES_XRCD: return "RES_XRCD";
258 default: return "Unknown resource type !!!";
262 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
264 struct mlx4_priv *priv = mlx4_priv(dev);
268 priv->mfunc.master.res_tracker.slave_list =
269 kzalloc(dev->num_slaves * sizeof(struct slave_list),
271 if (!priv->mfunc.master.res_tracker.slave_list)
274 for (i = 0 ; i < dev->num_slaves; i++) {
275 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
276 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
277 slave_list[i].res_list[t]);
278 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
281 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
283 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
284 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
286 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
290 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
291 enum mlx4_res_tracker_free_type type)
293 struct mlx4_priv *priv = mlx4_priv(dev);
296 if (priv->mfunc.master.res_tracker.slave_list) {
297 if (type != RES_TR_FREE_STRUCTS_ONLY)
298 for (i = 0 ; i < dev->num_slaves; i++)
299 if (type == RES_TR_FREE_ALL ||
300 dev->caps.function != i)
301 mlx4_delete_all_resources_for_slave(dev, i);
303 if (type != RES_TR_FREE_SLAVES_ONLY) {
304 kfree(priv->mfunc.master.res_tracker.slave_list);
305 priv->mfunc.master.res_tracker.slave_list = NULL;
310 static void update_ud_gid(struct mlx4_dev *dev,
311 struct mlx4_qp_context *qp_ctx, u8 slave)
313 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
315 if (MLX4_QP_ST_UD == ts)
316 qp_ctx->pri_path.mgid_index = 0x80 | slave;
318 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
319 slave, qp_ctx->pri_path.mgid_index);
322 static int mpt_mask(struct mlx4_dev *dev)
324 return dev->caps.num_mpts - 1;
327 static void *find_res(struct mlx4_dev *dev, int res_id,
328 enum mlx4_resource type)
330 struct mlx4_priv *priv = mlx4_priv(dev);
332 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
336 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
337 enum mlx4_resource type,
340 struct res_common *r;
343 spin_lock_irq(mlx4_tlock(dev));
344 r = find_res(dev, res_id, type);
350 if (r->state == RES_ANY_BUSY) {
355 if (r->owner != slave) {
360 r->from_state = r->state;
361 r->state = RES_ANY_BUSY;
362 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
363 ResourceType(type), r->res_id);
366 *((struct res_common **)res) = r;
369 spin_unlock_irq(mlx4_tlock(dev));
373 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
374 enum mlx4_resource type,
375 u64 res_id, int *slave)
378 struct res_common *r;
384 spin_lock(mlx4_tlock(dev));
386 r = find_res(dev, id, type);
391 spin_unlock(mlx4_tlock(dev));
396 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
397 enum mlx4_resource type)
399 struct res_common *r;
401 spin_lock_irq(mlx4_tlock(dev));
402 r = find_res(dev, res_id, type);
404 r->state = r->from_state;
405 spin_unlock_irq(mlx4_tlock(dev));
408 static struct res_common *alloc_qp_tr(int id)
412 ret = kzalloc(sizeof *ret, GFP_KERNEL);
416 ret->com.res_id = id;
417 ret->com.state = RES_QP_RESERVED;
419 INIT_LIST_HEAD(&ret->mcg_list);
420 spin_lock_init(&ret->mcg_spl);
425 static struct res_common *alloc_mtt_tr(int id, int order)
429 ret = kzalloc(sizeof *ret, GFP_KERNEL);
433 ret->com.res_id = id;
435 ret->com.state = RES_MTT_ALLOCATED;
436 atomic_set(&ret->ref_count, 0);
441 static struct res_common *alloc_mpt_tr(int id, int key)
445 ret = kzalloc(sizeof *ret, GFP_KERNEL);
449 ret->com.res_id = id;
450 ret->com.state = RES_MPT_RESERVED;
456 static struct res_common *alloc_eq_tr(int id)
460 ret = kzalloc(sizeof *ret, GFP_KERNEL);
464 ret->com.res_id = id;
465 ret->com.state = RES_EQ_RESERVED;
470 static struct res_common *alloc_cq_tr(int id)
474 ret = kzalloc(sizeof *ret, GFP_KERNEL);
478 ret->com.res_id = id;
479 ret->com.state = RES_CQ_ALLOCATED;
480 atomic_set(&ret->ref_count, 0);
485 static struct res_common *alloc_srq_tr(int id)
489 ret = kzalloc(sizeof *ret, GFP_KERNEL);
493 ret->com.res_id = id;
494 ret->com.state = RES_SRQ_ALLOCATED;
495 atomic_set(&ret->ref_count, 0);
500 static struct res_common *alloc_counter_tr(int id)
502 struct res_counter *ret;
504 ret = kzalloc(sizeof *ret, GFP_KERNEL);
508 ret->com.res_id = id;
509 ret->com.state = RES_COUNTER_ALLOCATED;
514 static struct res_common *alloc_xrcdn_tr(int id)
516 struct res_xrcdn *ret;
518 ret = kzalloc(sizeof *ret, GFP_KERNEL);
522 ret->com.res_id = id;
523 ret->com.state = RES_XRCD_ALLOCATED;
528 static struct res_common *alloc_fs_rule_tr(u64 id)
530 struct res_fs_rule *ret;
532 ret = kzalloc(sizeof *ret, GFP_KERNEL);
536 ret->com.res_id = id;
537 ret->com.state = RES_FS_RULE_ALLOCATED;
542 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
545 struct res_common *ret;
549 ret = alloc_qp_tr(id);
552 ret = alloc_mpt_tr(id, extra);
555 ret = alloc_mtt_tr(id, extra);
558 ret = alloc_eq_tr(id);
561 ret = alloc_cq_tr(id);
564 ret = alloc_srq_tr(id);
567 printk(KERN_ERR "implementation missing\n");
570 ret = alloc_counter_tr(id);
573 ret = alloc_xrcdn_tr(id);
576 ret = alloc_fs_rule_tr(id);
587 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
588 enum mlx4_resource type, int extra)
592 struct mlx4_priv *priv = mlx4_priv(dev);
593 struct res_common **res_arr;
594 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
595 struct rb_root *root = &tracker->res_tree[type];
597 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
601 for (i = 0; i < count; ++i) {
602 res_arr[i] = alloc_tr(base + i, type, slave, extra);
604 for (--i; i >= 0; --i)
612 spin_lock_irq(mlx4_tlock(dev));
613 for (i = 0; i < count; ++i) {
614 if (find_res(dev, base + i, type)) {
618 err = res_tracker_insert(root, res_arr[i]);
621 list_add_tail(&res_arr[i]->list,
622 &tracker->slave_list[slave].res_list[type]);
624 spin_unlock_irq(mlx4_tlock(dev));
630 for (--i; i >= base; --i)
631 rb_erase(&res_arr[i]->node, root);
633 spin_unlock_irq(mlx4_tlock(dev));
635 for (i = 0; i < count; ++i)
643 static int remove_qp_ok(struct res_qp *res)
645 if (res->com.state == RES_QP_BUSY)
647 else if (res->com.state != RES_QP_RESERVED)
653 static int remove_mtt_ok(struct res_mtt *res, int order)
655 if (res->com.state == RES_MTT_BUSY ||
656 atomic_read(&res->ref_count)) {
657 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
659 mtt_states_str(res->com.state),
660 atomic_read(&res->ref_count));
662 } else if (res->com.state != RES_MTT_ALLOCATED)
664 else if (res->order != order)
670 static int remove_mpt_ok(struct res_mpt *res)
672 if (res->com.state == RES_MPT_BUSY)
674 else if (res->com.state != RES_MPT_RESERVED)
680 static int remove_eq_ok(struct res_eq *res)
682 if (res->com.state == RES_MPT_BUSY)
684 else if (res->com.state != RES_MPT_RESERVED)
690 static int remove_counter_ok(struct res_counter *res)
692 if (res->com.state == RES_COUNTER_BUSY)
694 else if (res->com.state != RES_COUNTER_ALLOCATED)
700 static int remove_xrcdn_ok(struct res_xrcdn *res)
702 if (res->com.state == RES_XRCD_BUSY)
704 else if (res->com.state != RES_XRCD_ALLOCATED)
710 static int remove_fs_rule_ok(struct res_fs_rule *res)
712 if (res->com.state == RES_FS_RULE_BUSY)
714 else if (res->com.state != RES_FS_RULE_ALLOCATED)
720 static int remove_cq_ok(struct res_cq *res)
722 if (res->com.state == RES_CQ_BUSY)
724 else if (res->com.state != RES_CQ_ALLOCATED)
730 static int remove_srq_ok(struct res_srq *res)
732 if (res->com.state == RES_SRQ_BUSY)
734 else if (res->com.state != RES_SRQ_ALLOCATED)
740 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
744 return remove_qp_ok((struct res_qp *)res);
746 return remove_cq_ok((struct res_cq *)res);
748 return remove_srq_ok((struct res_srq *)res);
750 return remove_mpt_ok((struct res_mpt *)res);
752 return remove_mtt_ok((struct res_mtt *)res, extra);
756 return remove_eq_ok((struct res_eq *)res);
758 return remove_counter_ok((struct res_counter *)res);
760 return remove_xrcdn_ok((struct res_xrcdn *)res);
762 return remove_fs_rule_ok((struct res_fs_rule *)res);
768 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
769 enum mlx4_resource type, int extra)
773 struct mlx4_priv *priv = mlx4_priv(dev);
774 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
775 struct res_common *r;
777 spin_lock_irq(mlx4_tlock(dev));
778 for (i = base; i < base + count; ++i) {
779 r = res_tracker_lookup(&tracker->res_tree[type], i);
784 if (r->owner != slave) {
788 err = remove_ok(r, type, extra);
793 for (i = base; i < base + count; ++i) {
794 r = res_tracker_lookup(&tracker->res_tree[type], i);
795 rb_erase(&r->node, &tracker->res_tree[type]);
802 spin_unlock_irq(mlx4_tlock(dev));
807 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
808 enum res_qp_states state, struct res_qp **qp,
811 struct mlx4_priv *priv = mlx4_priv(dev);
812 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
816 spin_lock_irq(mlx4_tlock(dev));
817 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
820 else if (r->com.owner != slave)
825 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
826 __func__, r->com.res_id);
830 case RES_QP_RESERVED:
831 if (r->com.state == RES_QP_MAPPED && !alloc)
834 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
839 if ((r->com.state == RES_QP_RESERVED && alloc) ||
840 r->com.state == RES_QP_HW)
843 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
851 if (r->com.state != RES_QP_MAPPED)
859 r->com.from_state = r->com.state;
860 r->com.to_state = state;
861 r->com.state = RES_QP_BUSY;
867 spin_unlock_irq(mlx4_tlock(dev));
872 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
873 enum res_mpt_states state, struct res_mpt **mpt)
875 struct mlx4_priv *priv = mlx4_priv(dev);
876 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
880 spin_lock_irq(mlx4_tlock(dev));
881 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
884 else if (r->com.owner != slave)
892 case RES_MPT_RESERVED:
893 if (r->com.state != RES_MPT_MAPPED)
898 if (r->com.state != RES_MPT_RESERVED &&
899 r->com.state != RES_MPT_HW)
904 if (r->com.state != RES_MPT_MAPPED)
912 r->com.from_state = r->com.state;
913 r->com.to_state = state;
914 r->com.state = RES_MPT_BUSY;
920 spin_unlock_irq(mlx4_tlock(dev));
925 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
926 enum res_eq_states state, struct res_eq **eq)
928 struct mlx4_priv *priv = mlx4_priv(dev);
929 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
933 spin_lock_irq(mlx4_tlock(dev));
934 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
937 else if (r->com.owner != slave)
945 case RES_EQ_RESERVED:
946 if (r->com.state != RES_EQ_HW)
951 if (r->com.state != RES_EQ_RESERVED)
960 r->com.from_state = r->com.state;
961 r->com.to_state = state;
962 r->com.state = RES_EQ_BUSY;
968 spin_unlock_irq(mlx4_tlock(dev));
973 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
974 enum res_cq_states state, struct res_cq **cq)
976 struct mlx4_priv *priv = mlx4_priv(dev);
977 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
981 spin_lock_irq(mlx4_tlock(dev));
982 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
985 else if (r->com.owner != slave)
993 case RES_CQ_ALLOCATED:
994 if (r->com.state != RES_CQ_HW)
996 else if (atomic_read(&r->ref_count))
1003 if (r->com.state != RES_CQ_ALLOCATED)
1014 r->com.from_state = r->com.state;
1015 r->com.to_state = state;
1016 r->com.state = RES_CQ_BUSY;
1022 spin_unlock_irq(mlx4_tlock(dev));
1027 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1028 enum res_cq_states state, struct res_srq **srq)
1030 struct mlx4_priv *priv = mlx4_priv(dev);
1031 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1035 spin_lock_irq(mlx4_tlock(dev));
1036 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1039 else if (r->com.owner != slave)
1047 case RES_SRQ_ALLOCATED:
1048 if (r->com.state != RES_SRQ_HW)
1050 else if (atomic_read(&r->ref_count))
1055 if (r->com.state != RES_SRQ_ALLOCATED)
1064 r->com.from_state = r->com.state;
1065 r->com.to_state = state;
1066 r->com.state = RES_SRQ_BUSY;
1072 spin_unlock_irq(mlx4_tlock(dev));
1077 static void res_abort_move(struct mlx4_dev *dev, int slave,
1078 enum mlx4_resource type, int id)
1080 struct mlx4_priv *priv = mlx4_priv(dev);
1081 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1082 struct res_common *r;
1084 spin_lock_irq(mlx4_tlock(dev));
1085 r = res_tracker_lookup(&tracker->res_tree[type], id);
1086 if (r && (r->owner == slave))
1087 r->state = r->from_state;
1088 spin_unlock_irq(mlx4_tlock(dev));
1091 static void res_end_move(struct mlx4_dev *dev, int slave,
1092 enum mlx4_resource type, int id)
1094 struct mlx4_priv *priv = mlx4_priv(dev);
1095 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1096 struct res_common *r;
1098 spin_lock_irq(mlx4_tlock(dev));
1099 r = res_tracker_lookup(&tracker->res_tree[type], id);
1100 if (r && (r->owner == slave))
1101 r->state = r->to_state;
1102 spin_unlock_irq(mlx4_tlock(dev));
1105 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1107 return mlx4_is_qp_reserved(dev, qpn);
1110 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1111 u64 in_param, u64 *out_param)
1120 case RES_OP_RESERVE:
1121 count = get_param_l(&in_param);
1122 align = get_param_h(&in_param);
1123 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1127 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1129 __mlx4_qp_release_range(dev, base, count);
1132 set_param_l(out_param, base);
1134 case RES_OP_MAP_ICM:
1135 qpn = get_param_l(&in_param) & 0x7fffff;
1136 if (valid_reserved(dev, slave, qpn)) {
1137 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1142 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1147 if (!valid_reserved(dev, slave, qpn)) {
1148 err = __mlx4_qp_alloc_icm(dev, qpn);
1150 res_abort_move(dev, slave, RES_QP, qpn);
1155 res_end_move(dev, slave, RES_QP, qpn);
1165 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1166 u64 in_param, u64 *out_param)
1172 if (op != RES_OP_RESERVE_AND_MAP)
1175 order = get_param_l(&in_param);
1176 base = __mlx4_alloc_mtt_range(dev, order);
1180 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1182 __mlx4_free_mtt_range(dev, base, order);
1184 set_param_l(out_param, base);
1189 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1190 u64 in_param, u64 *out_param)
1195 struct res_mpt *mpt;
1198 case RES_OP_RESERVE:
1199 index = __mlx4_mr_reserve(dev);
1202 id = index & mpt_mask(dev);
1204 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1206 __mlx4_mr_release(dev, index);
1209 set_param_l(out_param, index);
1211 case RES_OP_MAP_ICM:
1212 index = get_param_l(&in_param);
1213 id = index & mpt_mask(dev);
1214 err = mr_res_start_move_to(dev, slave, id,
1215 RES_MPT_MAPPED, &mpt);
1219 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1221 res_abort_move(dev, slave, RES_MPT, id);
1225 res_end_move(dev, slave, RES_MPT, id);
1231 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1232 u64 in_param, u64 *out_param)
1238 case RES_OP_RESERVE_AND_MAP:
1239 err = __mlx4_cq_alloc_icm(dev, &cqn);
1243 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1245 __mlx4_cq_free_icm(dev, cqn);
1249 set_param_l(out_param, cqn);
1259 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1260 u64 in_param, u64 *out_param)
1266 case RES_OP_RESERVE_AND_MAP:
1267 err = __mlx4_srq_alloc_icm(dev, &srqn);
1271 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1273 __mlx4_srq_free_icm(dev, srqn);
1277 set_param_l(out_param, srqn);
1287 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1289 struct mlx4_priv *priv = mlx4_priv(dev);
1290 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1291 struct mac_res *res;
1293 res = kzalloc(sizeof *res, GFP_KERNEL);
1297 res->port = (u8) port;
1298 list_add_tail(&res->list,
1299 &tracker->slave_list[slave].res_list[RES_MAC]);
1303 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1306 struct mlx4_priv *priv = mlx4_priv(dev);
1307 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1308 struct list_head *mac_list =
1309 &tracker->slave_list[slave].res_list[RES_MAC];
1310 struct mac_res *res, *tmp;
1312 list_for_each_entry_safe(res, tmp, mac_list, list) {
1313 if (res->mac == mac && res->port == (u8) port) {
1314 list_del(&res->list);
1321 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1323 struct mlx4_priv *priv = mlx4_priv(dev);
1324 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1325 struct list_head *mac_list =
1326 &tracker->slave_list[slave].res_list[RES_MAC];
1327 struct mac_res *res, *tmp;
1329 list_for_each_entry_safe(res, tmp, mac_list, list) {
1330 list_del(&res->list);
1331 __mlx4_unregister_mac(dev, res->port, res->mac);
1336 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1337 u64 in_param, u64 *out_param)
1343 if (op != RES_OP_RESERVE_AND_MAP)
1346 port = get_param_l(out_param);
1349 err = __mlx4_register_mac(dev, port, mac);
1351 set_param_l(out_param, err);
1356 err = mac_add_to_slave(dev, slave, mac, port);
1358 __mlx4_unregister_mac(dev, port, mac);
1363 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1364 u64 in_param, u64 *out_param)
1369 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1370 u64 in_param, u64 *out_param)
1375 if (op != RES_OP_RESERVE)
1378 err = __mlx4_counter_alloc(dev, &index);
1382 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1384 __mlx4_counter_free(dev, index);
1386 set_param_l(out_param, index);
1391 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1392 u64 in_param, u64 *out_param)
1397 if (op != RES_OP_RESERVE)
1400 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1404 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1406 __mlx4_xrcd_free(dev, xrcdn);
1408 set_param_l(out_param, xrcdn);
1413 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1414 struct mlx4_vhcr *vhcr,
1415 struct mlx4_cmd_mailbox *inbox,
1416 struct mlx4_cmd_mailbox *outbox,
1417 struct mlx4_cmd_info *cmd)
1420 int alop = vhcr->op_modifier;
1422 switch (vhcr->in_modifier) {
1424 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1425 vhcr->in_param, &vhcr->out_param);
1429 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1430 vhcr->in_param, &vhcr->out_param);
1434 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1435 vhcr->in_param, &vhcr->out_param);
1439 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1440 vhcr->in_param, &vhcr->out_param);
1444 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1445 vhcr->in_param, &vhcr->out_param);
1449 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1450 vhcr->in_param, &vhcr->out_param);
1454 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1455 vhcr->in_param, &vhcr->out_param);
1459 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1460 vhcr->in_param, &vhcr->out_param);
1464 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1465 vhcr->in_param, &vhcr->out_param);
1476 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1485 case RES_OP_RESERVE:
1486 base = get_param_l(&in_param) & 0x7fffff;
1487 count = get_param_h(&in_param);
1488 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1491 __mlx4_qp_release_range(dev, base, count);
1493 case RES_OP_MAP_ICM:
1494 qpn = get_param_l(&in_param) & 0x7fffff;
1495 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1500 if (!valid_reserved(dev, slave, qpn))
1501 __mlx4_qp_free_icm(dev, qpn);
1503 res_end_move(dev, slave, RES_QP, qpn);
1505 if (valid_reserved(dev, slave, qpn))
1506 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1515 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1516 u64 in_param, u64 *out_param)
1522 if (op != RES_OP_RESERVE_AND_MAP)
1525 base = get_param_l(&in_param);
1526 order = get_param_h(&in_param);
1527 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1529 __mlx4_free_mtt_range(dev, base, order);
1533 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539 struct res_mpt *mpt;
1542 case RES_OP_RESERVE:
1543 index = get_param_l(&in_param);
1544 id = index & mpt_mask(dev);
1545 err = get_res(dev, slave, id, RES_MPT, &mpt);
1549 put_res(dev, slave, id, RES_MPT);
1551 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1554 __mlx4_mr_release(dev, index);
1556 case RES_OP_MAP_ICM:
1557 index = get_param_l(&in_param);
1558 id = index & mpt_mask(dev);
1559 err = mr_res_start_move_to(dev, slave, id,
1560 RES_MPT_RESERVED, &mpt);
1564 __mlx4_mr_free_icm(dev, mpt->key);
1565 res_end_move(dev, slave, RES_MPT, id);
1575 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1576 u64 in_param, u64 *out_param)
1582 case RES_OP_RESERVE_AND_MAP:
1583 cqn = get_param_l(&in_param);
1584 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1588 __mlx4_cq_free_icm(dev, cqn);
1599 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1600 u64 in_param, u64 *out_param)
1606 case RES_OP_RESERVE_AND_MAP:
1607 srqn = get_param_l(&in_param);
1608 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1612 __mlx4_srq_free_icm(dev, srqn);
1623 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1624 u64 in_param, u64 *out_param)
1630 case RES_OP_RESERVE_AND_MAP:
1631 port = get_param_l(out_param);
1632 mac_del_from_slave(dev, slave, in_param, port);
1633 __mlx4_unregister_mac(dev, port, in_param);
1644 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1645 u64 in_param, u64 *out_param)
1650 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1651 u64 in_param, u64 *out_param)
1656 if (op != RES_OP_RESERVE)
1659 index = get_param_l(&in_param);
1660 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1664 __mlx4_counter_free(dev, index);
1669 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1670 u64 in_param, u64 *out_param)
1675 if (op != RES_OP_RESERVE)
1678 xrcdn = get_param_l(&in_param);
1679 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1683 __mlx4_xrcd_free(dev, xrcdn);
1688 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1689 struct mlx4_vhcr *vhcr,
1690 struct mlx4_cmd_mailbox *inbox,
1691 struct mlx4_cmd_mailbox *outbox,
1692 struct mlx4_cmd_info *cmd)
1695 int alop = vhcr->op_modifier;
1697 switch (vhcr->in_modifier) {
1699 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1704 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1705 vhcr->in_param, &vhcr->out_param);
1709 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1714 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1715 vhcr->in_param, &vhcr->out_param);
1719 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1720 vhcr->in_param, &vhcr->out_param);
1724 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1725 vhcr->in_param, &vhcr->out_param);
1729 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1730 vhcr->in_param, &vhcr->out_param);
1734 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1735 vhcr->in_param, &vhcr->out_param);
1739 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1740 vhcr->in_param, &vhcr->out_param);
1748 /* ugly but other choices are uglier */
1749 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1751 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1754 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1756 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1759 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1761 return be32_to_cpu(mpt->mtt_sz);
1764 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1766 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1769 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1771 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1774 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1776 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1777 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1778 int log_sq_sride = qpc->sq_size_stride & 7;
1779 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1780 int log_rq_stride = qpc->rq_size_stride & 7;
1781 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1782 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1783 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1788 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1790 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1791 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1792 total_mem = sq_size + rq_size;
1794 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1800 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1801 int size, struct res_mtt *mtt)
1803 int res_start = mtt->com.res_id;
1804 int res_size = (1 << mtt->order);
1806 if (start < res_start || start + size > res_start + res_size)
1811 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1812 struct mlx4_vhcr *vhcr,
1813 struct mlx4_cmd_mailbox *inbox,
1814 struct mlx4_cmd_mailbox *outbox,
1815 struct mlx4_cmd_info *cmd)
1818 int index = vhcr->in_modifier;
1819 struct res_mtt *mtt;
1820 struct res_mpt *mpt;
1821 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1825 id = index & mpt_mask(dev);
1826 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1830 phys = mr_phys_mpt(inbox->buf);
1832 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1836 err = check_mtt_range(dev, slave, mtt_base,
1837 mr_get_mtt_size(inbox->buf), mtt);
1844 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1849 atomic_inc(&mtt->ref_count);
1850 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1853 res_end_move(dev, slave, RES_MPT, id);
1858 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1860 res_abort_move(dev, slave, RES_MPT, id);
1865 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1866 struct mlx4_vhcr *vhcr,
1867 struct mlx4_cmd_mailbox *inbox,
1868 struct mlx4_cmd_mailbox *outbox,
1869 struct mlx4_cmd_info *cmd)
1872 int index = vhcr->in_modifier;
1873 struct res_mpt *mpt;
1876 id = index & mpt_mask(dev);
1877 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1881 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1886 atomic_dec(&mpt->mtt->ref_count);
1888 res_end_move(dev, slave, RES_MPT, id);
1892 res_abort_move(dev, slave, RES_MPT, id);
1897 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1898 struct mlx4_vhcr *vhcr,
1899 struct mlx4_cmd_mailbox *inbox,
1900 struct mlx4_cmd_mailbox *outbox,
1901 struct mlx4_cmd_info *cmd)
1904 int index = vhcr->in_modifier;
1905 struct res_mpt *mpt;
1908 id = index & mpt_mask(dev);
1909 err = get_res(dev, slave, id, RES_MPT, &mpt);
1913 if (mpt->com.from_state != RES_MPT_HW) {
1918 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1921 put_res(dev, slave, id, RES_MPT);
1925 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1927 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1930 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1932 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1935 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1937 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1940 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1941 struct mlx4_vhcr *vhcr,
1942 struct mlx4_cmd_mailbox *inbox,
1943 struct mlx4_cmd_mailbox *outbox,
1944 struct mlx4_cmd_info *cmd)
1947 int qpn = vhcr->in_modifier & 0x7fffff;
1948 struct res_mtt *mtt;
1950 struct mlx4_qp_context *qpc = inbox->buf + 8;
1951 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1952 int mtt_size = qp_get_mtt_size(qpc);
1955 int rcqn = qp_get_rcqn(qpc);
1956 int scqn = qp_get_scqn(qpc);
1957 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1958 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1959 struct res_srq *srq;
1960 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1962 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1965 qp->local_qpn = local_qpn;
1967 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1971 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1975 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1980 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1987 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1992 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1995 atomic_inc(&mtt->ref_count);
1997 atomic_inc(&rcq->ref_count);
1999 atomic_inc(&scq->ref_count);
2003 put_res(dev, slave, scqn, RES_CQ);
2006 atomic_inc(&srq->ref_count);
2007 put_res(dev, slave, srqn, RES_SRQ);
2010 put_res(dev, slave, rcqn, RES_CQ);
2011 put_res(dev, slave, mtt_base, RES_MTT);
2012 res_end_move(dev, slave, RES_QP, qpn);
2018 put_res(dev, slave, srqn, RES_SRQ);
2021 put_res(dev, slave, scqn, RES_CQ);
2023 put_res(dev, slave, rcqn, RES_CQ);
2025 put_res(dev, slave, mtt_base, RES_MTT);
2027 res_abort_move(dev, slave, RES_QP, qpn);
2032 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2034 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2037 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2039 int log_eq_size = eqc->log_eq_size & 0x1f;
2040 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2042 if (log_eq_size + 5 < page_shift)
2045 return 1 << (log_eq_size + 5 - page_shift);
2048 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2050 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2053 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2055 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2056 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2058 if (log_cq_size + 5 < page_shift)
2061 return 1 << (log_cq_size + 5 - page_shift);
2064 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2065 struct mlx4_vhcr *vhcr,
2066 struct mlx4_cmd_mailbox *inbox,
2067 struct mlx4_cmd_mailbox *outbox,
2068 struct mlx4_cmd_info *cmd)
2071 int eqn = vhcr->in_modifier;
2072 int res_id = (slave << 8) | eqn;
2073 struct mlx4_eq_context *eqc = inbox->buf;
2074 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2075 int mtt_size = eq_get_mtt_size(eqc);
2077 struct res_mtt *mtt;
2079 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2082 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2086 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2090 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2094 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2098 atomic_inc(&mtt->ref_count);
2100 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2101 res_end_move(dev, slave, RES_EQ, res_id);
2105 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2107 res_abort_move(dev, slave, RES_EQ, res_id);
2109 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2113 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2114 int len, struct res_mtt **res)
2116 struct mlx4_priv *priv = mlx4_priv(dev);
2117 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2118 struct res_mtt *mtt;
2121 spin_lock_irq(mlx4_tlock(dev));
2122 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2124 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2126 mtt->com.from_state = mtt->com.state;
2127 mtt->com.state = RES_MTT_BUSY;
2132 spin_unlock_irq(mlx4_tlock(dev));
2137 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2138 struct mlx4_vhcr *vhcr,
2139 struct mlx4_cmd_mailbox *inbox,
2140 struct mlx4_cmd_mailbox *outbox,
2141 struct mlx4_cmd_info *cmd)
2143 struct mlx4_mtt mtt;
2144 __be64 *page_list = inbox->buf;
2145 u64 *pg_list = (u64 *)page_list;
2147 struct res_mtt *rmtt = NULL;
2148 int start = be64_to_cpu(page_list[0]);
2149 int npages = vhcr->in_modifier;
2152 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2156 /* Call the SW implementation of write_mtt:
2157 * - Prepare a dummy mtt struct
2158 * - Translate inbox contents to simple addresses in host endianess */
2159 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2160 we don't really use it */
2163 for (i = 0; i < npages; ++i)
2164 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2166 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2167 ((u64 *)page_list + 2));
2170 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2175 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2176 struct mlx4_vhcr *vhcr,
2177 struct mlx4_cmd_mailbox *inbox,
2178 struct mlx4_cmd_mailbox *outbox,
2179 struct mlx4_cmd_info *cmd)
2181 int eqn = vhcr->in_modifier;
2182 int res_id = eqn | (slave << 8);
2186 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2190 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2194 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2198 atomic_dec(&eq->mtt->ref_count);
2199 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2200 res_end_move(dev, slave, RES_EQ, res_id);
2201 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2206 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2208 res_abort_move(dev, slave, RES_EQ, res_id);
2213 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2215 struct mlx4_priv *priv = mlx4_priv(dev);
2216 struct mlx4_slave_event_eq_info *event_eq;
2217 struct mlx4_cmd_mailbox *mailbox;
2218 u32 in_modifier = 0;
2223 if (!priv->mfunc.master.slave_state)
2226 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2228 /* Create the event only if the slave is registered */
2229 if (event_eq->eqn < 0)
2232 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2233 res_id = (slave << 8) | event_eq->eqn;
2234 err = get_res(dev, slave, res_id, RES_EQ, &req);
2238 if (req->com.from_state != RES_EQ_HW) {
2243 mailbox = mlx4_alloc_cmd_mailbox(dev);
2244 if (IS_ERR(mailbox)) {
2245 err = PTR_ERR(mailbox);
2249 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2251 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2254 memcpy(mailbox->buf, (u8 *) eqe, 28);
2256 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2258 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2259 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2262 put_res(dev, slave, res_id, RES_EQ);
2263 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2264 mlx4_free_cmd_mailbox(dev, mailbox);
2268 put_res(dev, slave, res_id, RES_EQ);
2271 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2275 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2276 struct mlx4_vhcr *vhcr,
2277 struct mlx4_cmd_mailbox *inbox,
2278 struct mlx4_cmd_mailbox *outbox,
2279 struct mlx4_cmd_info *cmd)
2281 int eqn = vhcr->in_modifier;
2282 int res_id = eqn | (slave << 8);
2286 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2290 if (eq->com.from_state != RES_EQ_HW) {
2295 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2298 put_res(dev, slave, res_id, RES_EQ);
2302 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2303 struct mlx4_vhcr *vhcr,
2304 struct mlx4_cmd_mailbox *inbox,
2305 struct mlx4_cmd_mailbox *outbox,
2306 struct mlx4_cmd_info *cmd)
2309 int cqn = vhcr->in_modifier;
2310 struct mlx4_cq_context *cqc = inbox->buf;
2311 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2313 struct res_mtt *mtt;
2315 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2318 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2321 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2324 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2327 atomic_inc(&mtt->ref_count);
2329 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2330 res_end_move(dev, slave, RES_CQ, cqn);
2334 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2336 res_abort_move(dev, slave, RES_CQ, cqn);
2340 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2341 struct mlx4_vhcr *vhcr,
2342 struct mlx4_cmd_mailbox *inbox,
2343 struct mlx4_cmd_mailbox *outbox,
2344 struct mlx4_cmd_info *cmd)
2347 int cqn = vhcr->in_modifier;
2350 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2353 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2356 atomic_dec(&cq->mtt->ref_count);
2357 res_end_move(dev, slave, RES_CQ, cqn);
2361 res_abort_move(dev, slave, RES_CQ, cqn);
2365 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2366 struct mlx4_vhcr *vhcr,
2367 struct mlx4_cmd_mailbox *inbox,
2368 struct mlx4_cmd_mailbox *outbox,
2369 struct mlx4_cmd_info *cmd)
2371 int cqn = vhcr->in_modifier;
2375 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2379 if (cq->com.from_state != RES_CQ_HW)
2382 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2384 put_res(dev, slave, cqn, RES_CQ);
2389 static int handle_resize(struct mlx4_dev *dev, int slave,
2390 struct mlx4_vhcr *vhcr,
2391 struct mlx4_cmd_mailbox *inbox,
2392 struct mlx4_cmd_mailbox *outbox,
2393 struct mlx4_cmd_info *cmd,
2397 struct res_mtt *orig_mtt;
2398 struct res_mtt *mtt;
2399 struct mlx4_cq_context *cqc = inbox->buf;
2400 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2402 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2406 if (orig_mtt != cq->mtt) {
2411 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2415 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2418 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2421 atomic_dec(&orig_mtt->ref_count);
2422 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2423 atomic_inc(&mtt->ref_count);
2425 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2429 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2431 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2437 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2438 struct mlx4_vhcr *vhcr,
2439 struct mlx4_cmd_mailbox *inbox,
2440 struct mlx4_cmd_mailbox *outbox,
2441 struct mlx4_cmd_info *cmd)
2443 int cqn = vhcr->in_modifier;
2447 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2451 if (cq->com.from_state != RES_CQ_HW)
2454 if (vhcr->op_modifier == 0) {
2455 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2459 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2461 put_res(dev, slave, cqn, RES_CQ);
2466 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2468 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2469 int log_rq_stride = srqc->logstride & 7;
2470 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2472 if (log_srq_size + log_rq_stride + 4 < page_shift)
2475 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2478 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2479 struct mlx4_vhcr *vhcr,
2480 struct mlx4_cmd_mailbox *inbox,
2481 struct mlx4_cmd_mailbox *outbox,
2482 struct mlx4_cmd_info *cmd)
2485 int srqn = vhcr->in_modifier;
2486 struct res_mtt *mtt;
2487 struct res_srq *srq;
2488 struct mlx4_srq_context *srqc = inbox->buf;
2489 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2491 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2494 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2497 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2500 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2505 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2509 atomic_inc(&mtt->ref_count);
2511 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2512 res_end_move(dev, slave, RES_SRQ, srqn);
2516 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2518 res_abort_move(dev, slave, RES_SRQ, srqn);
2523 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2524 struct mlx4_vhcr *vhcr,
2525 struct mlx4_cmd_mailbox *inbox,
2526 struct mlx4_cmd_mailbox *outbox,
2527 struct mlx4_cmd_info *cmd)
2530 int srqn = vhcr->in_modifier;
2531 struct res_srq *srq;
2533 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2536 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2539 atomic_dec(&srq->mtt->ref_count);
2541 atomic_dec(&srq->cq->ref_count);
2542 res_end_move(dev, slave, RES_SRQ, srqn);
2547 res_abort_move(dev, slave, RES_SRQ, srqn);
2552 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2553 struct mlx4_vhcr *vhcr,
2554 struct mlx4_cmd_mailbox *inbox,
2555 struct mlx4_cmd_mailbox *outbox,
2556 struct mlx4_cmd_info *cmd)
2559 int srqn = vhcr->in_modifier;
2560 struct res_srq *srq;
2562 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2565 if (srq->com.from_state != RES_SRQ_HW) {
2569 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2571 put_res(dev, slave, srqn, RES_SRQ);
2575 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2576 struct mlx4_vhcr *vhcr,
2577 struct mlx4_cmd_mailbox *inbox,
2578 struct mlx4_cmd_mailbox *outbox,
2579 struct mlx4_cmd_info *cmd)
2582 int srqn = vhcr->in_modifier;
2583 struct res_srq *srq;
2585 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2589 if (srq->com.from_state != RES_SRQ_HW) {
2594 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2596 put_res(dev, slave, srqn, RES_SRQ);
2600 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2601 struct mlx4_vhcr *vhcr,
2602 struct mlx4_cmd_mailbox *inbox,
2603 struct mlx4_cmd_mailbox *outbox,
2604 struct mlx4_cmd_info *cmd)
2607 int qpn = vhcr->in_modifier & 0x7fffff;
2610 err = get_res(dev, slave, qpn, RES_QP, &qp);
2613 if (qp->com.from_state != RES_QP_HW) {
2618 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2620 put_res(dev, slave, qpn, RES_QP);
2624 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2625 struct mlx4_vhcr *vhcr,
2626 struct mlx4_cmd_mailbox *inbox,
2627 struct mlx4_cmd_mailbox *outbox,
2628 struct mlx4_cmd_info *cmd)
2630 struct mlx4_qp_context *qpc = inbox->buf + 8;
2632 update_ud_gid(dev, qpc, (u8)slave);
2634 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2637 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2638 struct mlx4_vhcr *vhcr,
2639 struct mlx4_cmd_mailbox *inbox,
2640 struct mlx4_cmd_mailbox *outbox,
2641 struct mlx4_cmd_info *cmd)
2644 int qpn = vhcr->in_modifier & 0x7fffff;
2647 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2650 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2654 atomic_dec(&qp->mtt->ref_count);
2655 atomic_dec(&qp->rcq->ref_count);
2656 atomic_dec(&qp->scq->ref_count);
2658 atomic_dec(&qp->srq->ref_count);
2659 res_end_move(dev, slave, RES_QP, qpn);
2663 res_abort_move(dev, slave, RES_QP, qpn);
2668 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2669 struct res_qp *rqp, u8 *gid)
2671 struct res_gid *res;
2673 list_for_each_entry(res, &rqp->mcg_list, list) {
2674 if (!memcmp(res->gid, gid, 16))
2680 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2681 u8 *gid, enum mlx4_protocol prot,
2682 enum mlx4_steer_type steer)
2684 struct res_gid *res;
2687 res = kzalloc(sizeof *res, GFP_KERNEL);
2691 spin_lock_irq(&rqp->mcg_spl);
2692 if (find_gid(dev, slave, rqp, gid)) {
2696 memcpy(res->gid, gid, 16);
2699 list_add_tail(&res->list, &rqp->mcg_list);
2702 spin_unlock_irq(&rqp->mcg_spl);
2707 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2708 u8 *gid, enum mlx4_protocol prot,
2709 enum mlx4_steer_type steer)
2711 struct res_gid *res;
2714 spin_lock_irq(&rqp->mcg_spl);
2715 res = find_gid(dev, slave, rqp, gid);
2716 if (!res || res->prot != prot || res->steer != steer)
2719 list_del(&res->list);
2723 spin_unlock_irq(&rqp->mcg_spl);
2728 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2729 struct mlx4_vhcr *vhcr,
2730 struct mlx4_cmd_mailbox *inbox,
2731 struct mlx4_cmd_mailbox *outbox,
2732 struct mlx4_cmd_info *cmd)
2734 struct mlx4_qp qp; /* dummy for calling attach/detach */
2735 u8 *gid = inbox->buf;
2736 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2740 int attach = vhcr->op_modifier;
2741 int block_loopback = vhcr->in_modifier >> 31;
2742 u8 steer_type_mask = 2;
2743 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2745 qpn = vhcr->in_modifier & 0xffffff;
2746 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2752 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2756 err = mlx4_qp_attach_common(dev, &qp, gid,
2757 block_loopback, prot, type);
2761 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2764 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2767 put_res(dev, slave, qpn, RES_QP);
2771 /* ignore error return below, already in error */
2772 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2774 put_res(dev, slave, qpn, RES_QP);
2779 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2780 struct mlx4_vhcr *vhcr,
2781 struct mlx4_cmd_mailbox *inbox,
2782 struct mlx4_cmd_mailbox *outbox,
2783 struct mlx4_cmd_info *cmd)
2787 if (dev->caps.steering_mode !=
2788 MLX4_STEERING_MODE_DEVICE_MANAGED)
2791 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2792 vhcr->in_modifier, 0,
2793 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2798 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2800 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2802 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2803 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2809 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2810 struct mlx4_vhcr *vhcr,
2811 struct mlx4_cmd_mailbox *inbox,
2812 struct mlx4_cmd_mailbox *outbox,
2813 struct mlx4_cmd_info *cmd)
2817 if (dev->caps.steering_mode !=
2818 MLX4_STEERING_MODE_DEVICE_MANAGED)
2821 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2823 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2827 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2828 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2834 BUSY_MAX_RETRIES = 10
2837 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2838 struct mlx4_vhcr *vhcr,
2839 struct mlx4_cmd_mailbox *inbox,
2840 struct mlx4_cmd_mailbox *outbox,
2841 struct mlx4_cmd_info *cmd)
2844 int index = vhcr->in_modifier & 0xffff;
2846 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2850 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2851 put_res(dev, slave, index, RES_COUNTER);
2855 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2857 struct res_gid *rgid;
2858 struct res_gid *tmp;
2859 struct mlx4_qp qp; /* dummy for calling attach/detach */
2861 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2862 qp.qpn = rqp->local_qpn;
2863 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2865 list_del(&rgid->list);
2870 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2871 enum mlx4_resource type, int print)
2873 struct mlx4_priv *priv = mlx4_priv(dev);
2874 struct mlx4_resource_tracker *tracker =
2875 &priv->mfunc.master.res_tracker;
2876 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2877 struct res_common *r;
2878 struct res_common *tmp;
2882 spin_lock_irq(mlx4_tlock(dev));
2883 list_for_each_entry_safe(r, tmp, rlist, list) {
2884 if (r->owner == slave) {
2886 if (r->state == RES_ANY_BUSY) {
2889 "%s id 0x%llx is busy\n",
2894 r->from_state = r->state;
2895 r->state = RES_ANY_BUSY;
2901 spin_unlock_irq(mlx4_tlock(dev));
2906 static int move_all_busy(struct mlx4_dev *dev, int slave,
2907 enum mlx4_resource type)
2909 unsigned long begin;
2914 busy = _move_all_busy(dev, slave, type, 0);
2915 if (time_after(jiffies, begin + 5 * HZ))
2922 busy = _move_all_busy(dev, slave, type, 1);
2926 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2928 struct mlx4_priv *priv = mlx4_priv(dev);
2929 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2930 struct list_head *qp_list =
2931 &tracker->slave_list[slave].res_list[RES_QP];
2939 err = move_all_busy(dev, slave, RES_QP);
2941 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2942 "for slave %d\n", slave);
2944 spin_lock_irq(mlx4_tlock(dev));
2945 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2946 spin_unlock_irq(mlx4_tlock(dev));
2947 if (qp->com.owner == slave) {
2948 qpn = qp->com.res_id;
2949 detach_qp(dev, slave, qp);
2950 state = qp->com.from_state;
2951 while (state != 0) {
2953 case RES_QP_RESERVED:
2954 spin_lock_irq(mlx4_tlock(dev));
2955 rb_erase(&qp->com.node,
2956 &tracker->res_tree[RES_QP]);
2957 list_del(&qp->com.list);
2958 spin_unlock_irq(mlx4_tlock(dev));
2963 if (!valid_reserved(dev, slave, qpn))
2964 __mlx4_qp_free_icm(dev, qpn);
2965 state = RES_QP_RESERVED;
2969 err = mlx4_cmd(dev, in_param,
2972 MLX4_CMD_TIME_CLASS_A,
2975 mlx4_dbg(dev, "rem_slave_qps: failed"
2976 " to move slave %d qpn %d to"
2979 atomic_dec(&qp->rcq->ref_count);
2980 atomic_dec(&qp->scq->ref_count);
2981 atomic_dec(&qp->mtt->ref_count);
2983 atomic_dec(&qp->srq->ref_count);
2984 state = RES_QP_MAPPED;
2991 spin_lock_irq(mlx4_tlock(dev));
2993 spin_unlock_irq(mlx4_tlock(dev));
2996 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2998 struct mlx4_priv *priv = mlx4_priv(dev);
2999 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3000 struct list_head *srq_list =
3001 &tracker->slave_list[slave].res_list[RES_SRQ];
3002 struct res_srq *srq;
3003 struct res_srq *tmp;
3010 err = move_all_busy(dev, slave, RES_SRQ);
3012 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3013 "busy for slave %d\n", slave);
3015 spin_lock_irq(mlx4_tlock(dev));
3016 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3017 spin_unlock_irq(mlx4_tlock(dev));
3018 if (srq->com.owner == slave) {
3019 srqn = srq->com.res_id;
3020 state = srq->com.from_state;
3021 while (state != 0) {
3023 case RES_SRQ_ALLOCATED:
3024 __mlx4_srq_free_icm(dev, srqn);
3025 spin_lock_irq(mlx4_tlock(dev));
3026 rb_erase(&srq->com.node,
3027 &tracker->res_tree[RES_SRQ]);
3028 list_del(&srq->com.list);
3029 spin_unlock_irq(mlx4_tlock(dev));
3036 err = mlx4_cmd(dev, in_param, srqn, 1,
3038 MLX4_CMD_TIME_CLASS_A,
3041 mlx4_dbg(dev, "rem_slave_srqs: failed"
3042 " to move slave %d srq %d to"
3046 atomic_dec(&srq->mtt->ref_count);
3048 atomic_dec(&srq->cq->ref_count);
3049 state = RES_SRQ_ALLOCATED;
3057 spin_lock_irq(mlx4_tlock(dev));
3059 spin_unlock_irq(mlx4_tlock(dev));
3062 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3064 struct mlx4_priv *priv = mlx4_priv(dev);
3065 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3066 struct list_head *cq_list =
3067 &tracker->slave_list[slave].res_list[RES_CQ];
3076 err = move_all_busy(dev, slave, RES_CQ);
3078 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3079 "busy for slave %d\n", slave);
3081 spin_lock_irq(mlx4_tlock(dev));
3082 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3083 spin_unlock_irq(mlx4_tlock(dev));
3084 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3085 cqn = cq->com.res_id;
3086 state = cq->com.from_state;
3087 while (state != 0) {
3089 case RES_CQ_ALLOCATED:
3090 __mlx4_cq_free_icm(dev, cqn);
3091 spin_lock_irq(mlx4_tlock(dev));
3092 rb_erase(&cq->com.node,
3093 &tracker->res_tree[RES_CQ]);
3094 list_del(&cq->com.list);
3095 spin_unlock_irq(mlx4_tlock(dev));
3102 err = mlx4_cmd(dev, in_param, cqn, 1,
3104 MLX4_CMD_TIME_CLASS_A,
3107 mlx4_dbg(dev, "rem_slave_cqs: failed"
3108 " to move slave %d cq %d to"
3111 atomic_dec(&cq->mtt->ref_count);
3112 state = RES_CQ_ALLOCATED;
3120 spin_lock_irq(mlx4_tlock(dev));
3122 spin_unlock_irq(mlx4_tlock(dev));
3125 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3127 struct mlx4_priv *priv = mlx4_priv(dev);
3128 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3129 struct list_head *mpt_list =
3130 &tracker->slave_list[slave].res_list[RES_MPT];
3131 struct res_mpt *mpt;
3132 struct res_mpt *tmp;
3139 err = move_all_busy(dev, slave, RES_MPT);
3141 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3142 "busy for slave %d\n", slave);
3144 spin_lock_irq(mlx4_tlock(dev));
3145 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3146 spin_unlock_irq(mlx4_tlock(dev));
3147 if (mpt->com.owner == slave) {
3148 mptn = mpt->com.res_id;
3149 state = mpt->com.from_state;
3150 while (state != 0) {
3152 case RES_MPT_RESERVED:
3153 __mlx4_mr_release(dev, mpt->key);
3154 spin_lock_irq(mlx4_tlock(dev));
3155 rb_erase(&mpt->com.node,
3156 &tracker->res_tree[RES_MPT]);
3157 list_del(&mpt->com.list);
3158 spin_unlock_irq(mlx4_tlock(dev));
3163 case RES_MPT_MAPPED:
3164 __mlx4_mr_free_icm(dev, mpt->key);
3165 state = RES_MPT_RESERVED;
3170 err = mlx4_cmd(dev, in_param, mptn, 0,
3172 MLX4_CMD_TIME_CLASS_A,
3175 mlx4_dbg(dev, "rem_slave_mrs: failed"
3176 " to move slave %d mpt %d to"
3180 atomic_dec(&mpt->mtt->ref_count);
3181 state = RES_MPT_MAPPED;
3188 spin_lock_irq(mlx4_tlock(dev));
3190 spin_unlock_irq(mlx4_tlock(dev));
3193 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3195 struct mlx4_priv *priv = mlx4_priv(dev);
3196 struct mlx4_resource_tracker *tracker =
3197 &priv->mfunc.master.res_tracker;
3198 struct list_head *mtt_list =
3199 &tracker->slave_list[slave].res_list[RES_MTT];
3200 struct res_mtt *mtt;
3201 struct res_mtt *tmp;
3207 err = move_all_busy(dev, slave, RES_MTT);
3209 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3210 "busy for slave %d\n", slave);
3212 spin_lock_irq(mlx4_tlock(dev));
3213 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3214 spin_unlock_irq(mlx4_tlock(dev));
3215 if (mtt->com.owner == slave) {
3216 base = mtt->com.res_id;
3217 state = mtt->com.from_state;
3218 while (state != 0) {
3220 case RES_MTT_ALLOCATED:
3221 __mlx4_free_mtt_range(dev, base,
3223 spin_lock_irq(mlx4_tlock(dev));
3224 rb_erase(&mtt->com.node,
3225 &tracker->res_tree[RES_MTT]);
3226 list_del(&mtt->com.list);
3227 spin_unlock_irq(mlx4_tlock(dev));
3237 spin_lock_irq(mlx4_tlock(dev));
3239 spin_unlock_irq(mlx4_tlock(dev));
3242 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3244 struct mlx4_priv *priv = mlx4_priv(dev);
3245 struct mlx4_resource_tracker *tracker =
3246 &priv->mfunc.master.res_tracker;
3247 struct list_head *fs_rule_list =
3248 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3249 struct res_fs_rule *fs_rule;
3250 struct res_fs_rule *tmp;
3255 err = move_all_busy(dev, slave, RES_FS_RULE);
3257 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3260 spin_lock_irq(mlx4_tlock(dev));
3261 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (fs_rule->com.owner == slave) {
3264 base = fs_rule->com.res_id;
3265 state = fs_rule->com.from_state;
3266 while (state != 0) {
3268 case RES_FS_RULE_ALLOCATED:
3270 err = mlx4_cmd(dev, base, 0, 0,
3271 MLX4_QP_FLOW_STEERING_DETACH,
3272 MLX4_CMD_TIME_CLASS_A,
3275 spin_lock_irq(mlx4_tlock(dev));
3276 rb_erase(&fs_rule->com.node,
3277 &tracker->res_tree[RES_FS_RULE]);
3278 list_del(&fs_rule->com.list);
3279 spin_unlock_irq(mlx4_tlock(dev));
3289 spin_lock_irq(mlx4_tlock(dev));
3291 spin_unlock_irq(mlx4_tlock(dev));
3294 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3296 struct mlx4_priv *priv = mlx4_priv(dev);
3297 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3298 struct list_head *eq_list =
3299 &tracker->slave_list[slave].res_list[RES_EQ];
3306 struct mlx4_cmd_mailbox *mailbox;
3308 err = move_all_busy(dev, slave, RES_EQ);
3310 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3311 "busy for slave %d\n", slave);
3313 spin_lock_irq(mlx4_tlock(dev));
3314 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3315 spin_unlock_irq(mlx4_tlock(dev));
3316 if (eq->com.owner == slave) {
3317 eqn = eq->com.res_id;
3318 state = eq->com.from_state;
3319 while (state != 0) {
3321 case RES_EQ_RESERVED:
3322 spin_lock_irq(mlx4_tlock(dev));
3323 rb_erase(&eq->com.node,
3324 &tracker->res_tree[RES_EQ]);
3325 list_del(&eq->com.list);
3326 spin_unlock_irq(mlx4_tlock(dev));
3332 mailbox = mlx4_alloc_cmd_mailbox(dev);
3333 if (IS_ERR(mailbox)) {
3337 err = mlx4_cmd_box(dev, slave, 0,
3340 MLX4_CMD_TIME_CLASS_A,
3343 mlx4_dbg(dev, "rem_slave_eqs: failed"
3344 " to move slave %d eqs %d to"
3345 " SW ownership\n", slave, eqn);
3346 mlx4_free_cmd_mailbox(dev, mailbox);
3347 atomic_dec(&eq->mtt->ref_count);
3348 state = RES_EQ_RESERVED;
3356 spin_lock_irq(mlx4_tlock(dev));
3358 spin_unlock_irq(mlx4_tlock(dev));
3361 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3363 struct mlx4_priv *priv = mlx4_priv(dev);
3364 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3365 struct list_head *counter_list =
3366 &tracker->slave_list[slave].res_list[RES_COUNTER];
3367 struct res_counter *counter;
3368 struct res_counter *tmp;
3372 err = move_all_busy(dev, slave, RES_COUNTER);
3374 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3375 "busy for slave %d\n", slave);
3377 spin_lock_irq(mlx4_tlock(dev));
3378 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3379 if (counter->com.owner == slave) {
3380 index = counter->com.res_id;
3381 rb_erase(&counter->com.node,
3382 &tracker->res_tree[RES_COUNTER]);
3383 list_del(&counter->com.list);
3385 __mlx4_counter_free(dev, index);
3388 spin_unlock_irq(mlx4_tlock(dev));
3391 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3393 struct mlx4_priv *priv = mlx4_priv(dev);
3394 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3395 struct list_head *xrcdn_list =
3396 &tracker->slave_list[slave].res_list[RES_XRCD];
3397 struct res_xrcdn *xrcd;
3398 struct res_xrcdn *tmp;
3402 err = move_all_busy(dev, slave, RES_XRCD);
3404 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3405 "busy for slave %d\n", slave);
3407 spin_lock_irq(mlx4_tlock(dev));
3408 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3409 if (xrcd->com.owner == slave) {
3410 xrcdn = xrcd->com.res_id;
3411 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3412 list_del(&xrcd->com.list);
3414 __mlx4_xrcd_free(dev, xrcdn);
3417 spin_unlock_irq(mlx4_tlock(dev));
3420 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3422 struct mlx4_priv *priv = mlx4_priv(dev);
3424 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3426 rem_slave_macs(dev, slave);
3427 rem_slave_qps(dev, slave);
3428 rem_slave_srqs(dev, slave);
3429 rem_slave_cqs(dev, slave);
3430 rem_slave_mrs(dev, slave);
3431 rem_slave_eqs(dev, slave);
3432 rem_slave_mtts(dev, slave);
3433 rem_slave_counters(dev, slave);
3434 rem_slave_xrcdns(dev, slave);
3435 rem_slave_fs_rule(dev, slave);
3436 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);