2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
41 MLX5_PAGES_CANT_GIVE = 0,
46 struct mlx5_pages_req {
47 struct mlx5_core_dev *dev;
50 struct work_struct work;
54 struct rb_node rb_node;
60 struct mlx5_query_pages_inbox {
61 struct mlx5_inbox_hdr hdr;
65 struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr;
67 __be16 num_boot_pages;
73 struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr;
83 struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr;
91 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
93 struct rb_root *root = &dev->priv.page_root;
94 struct rb_node **new = &root->rb_node;
95 struct rb_node *parent = NULL;
101 tfp = rb_entry(parent, struct fw_page, rb_node);
102 if (tfp->addr < addr)
103 new = &parent->rb_left;
104 else if (tfp->addr > addr)
105 new = &parent->rb_right;
110 nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
116 nfp->func_id = func_id;
118 rb_link_node(&nfp->rb_node, parent, new);
119 rb_insert_color(&nfp->rb_node, root);
124 static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
126 struct rb_root *root = &dev->priv.page_root;
127 struct rb_node *tmp = root->rb_node;
128 struct page *result = NULL;
132 tfp = rb_entry(tmp, struct fw_page, rb_node);
133 if (tfp->addr < addr) {
135 } else if (tfp->addr > addr) {
138 rb_erase(&tfp->rb_node, root);
148 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages, u16 *boot_pages)
151 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out;
155 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
163 return mlx5_cmd_status_to_err(&out.hdr);
166 *pages = be16_to_cpu(out.num_pages);
169 *init_pages = be16_to_cpu(out.init_pages);
172 *boot_pages = be16_to_cpu(out.num_boot_pages);
174 *func_id = be16_to_cpu(out.func_id);
179 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
182 struct mlx5_manage_pages_inbox *in;
183 struct mlx5_manage_pages_outbox out;
190 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
191 in = mlx5_vzalloc(inlen);
193 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
196 memset(&out, 0, sizeof(out));
198 for (i = 0; i < npages; i++) {
199 page = alloc_page(GFP_HIGHUSER);
202 mlx5_core_warn(dev, "failed to allocate page\n");
205 addr = dma_map_page(&dev->pdev->dev, page, 0,
206 PAGE_SIZE, DMA_BIDIRECTIONAL);
207 if (dma_mapping_error(&dev->pdev->dev, addr)) {
208 mlx5_core_warn(dev, "failed dma mapping page\n");
213 err = insert_page(dev, addr, page, func_id);
215 mlx5_core_err(dev, "failed to track allocated page\n");
216 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
221 in->pas[i] = cpu_to_be64(addr);
224 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
225 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
226 in->func_id = cpu_to_be16(func_id);
227 in->num_entries = cpu_to_be16(npages);
228 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
229 mlx5_core_dbg(dev, "err %d\n", err);
231 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
234 dev->priv.fw_pages += npages;
236 if (out.hdr.status) {
237 err = mlx5_cmd_status_to_err(&out.hdr);
239 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
244 mlx5_core_dbg(dev, "err %d\n", err);
250 memset(in, 0, inlen);
251 memset(&out, 0, sizeof(out));
252 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
253 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
254 if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
255 mlx5_core_warn(dev, "\n");
257 for (i--; i >= 0; i--) {
258 addr = be64_to_cpu(in->pas[i]);
259 page = remove_page(dev, addr);
261 mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
265 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
274 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
277 struct mlx5_manage_pages_inbox in;
278 struct mlx5_manage_pages_outbox *out;
286 memset(&in, 0, sizeof(in));
287 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
288 out = mlx5_vzalloc(outlen);
292 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
293 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
294 in.func_id = cpu_to_be16(func_id);
295 in.num_entries = cpu_to_be16(npages);
296 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
297 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
299 mlx5_core_err(dev, "failed recliaming pages\n");
302 dev->priv.fw_pages -= npages;
304 if (out->hdr.status) {
305 err = mlx5_cmd_status_to_err(&out->hdr);
309 num_claimed = be16_to_cpu(out->num_entries);
311 *nclaimed = num_claimed;
313 for (i = 0; i < num_claimed; i++) {
314 addr = be64_to_cpu(out->pas[i]);
315 page = remove_page(dev, addr);
317 mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
319 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
329 static void pages_work_handler(struct work_struct *work)
331 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
332 struct mlx5_core_dev *dev = req->dev;
336 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
337 else if (req->npages > 0)
338 err = give_pages(dev, req->func_id, req->npages, 1);
341 mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
342 "reclaim" : "give", err);
347 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
350 struct mlx5_pages_req *req;
352 req = kzalloc(sizeof(*req), GFP_ATOMIC);
354 mlx5_core_warn(dev, "failed to allocate pages request\n");
359 req->func_id = func_id;
360 req->npages = npages;
361 INIT_WORK(&req->work, pages_work_handler);
362 queue_work(dev->priv.pg_wq, &req->work);
365 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
367 u16 uninitialized_var(boot_pages);
368 s16 uninitialized_var(init_pages);
369 u16 uninitialized_var(func_id);
372 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
378 mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
379 init_pages, boot_pages, func_id);
380 return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
383 static int optimal_reclaimed_pages(void)
385 struct mlx5_cmd_prot_block *block;
386 struct mlx5_cmd_layout *lay;
389 ret = (sizeof(lay->in) + sizeof(block->data) -
390 sizeof(struct mlx5_manage_pages_outbox)) / 8;
395 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
397 unsigned long end = jiffies + msecs_to_jiffies(5000);
403 p = rb_first(&dev->priv.page_root);
405 fwp = rb_entry(p, struct fw_page, rb_node);
406 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
408 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
412 if (time_after(jiffies, end)) {
413 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
421 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
423 dev->priv.page_root = RB_ROOT;
426 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
431 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
433 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
434 if (!dev->priv.pg_wq)
440 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
442 destroy_workqueue(dev->priv.pg_wq);