d3446d48ae68a60154a505ac04a7c8c7205d982d
[akaros.git] / kern / drivers / net / mlx4 / icm.c
1 /*
2  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux_compat.h>
35 #include <linux/mlx4/cmd.h>
36
37 #include "mlx4.h"
38 #include "icm.h"
39 #include "fw.h"
40
41 /*
42  * We allocate in as big chunks as we can, up to a maximum of 256 KB
43  * per chunk.
44  */
45 enum {
46         MLX4_ICM_ALLOC_SIZE     = 1 << 18,
47         MLX4_TABLE_CHUNK_SIZE   = 1 << 18
48 };
49
50 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
51 {
52         int i;
53
54         if (chunk->nsg > 0)
55                 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
56                              PCI_DMA_BIDIRECTIONAL);
57
58         for (i = 0; i < chunk->npages; ++i)
59                 free_cont_pages(page2kva(sg_page(&chunk->mem[i])),
60                                 get_order(chunk->mem[i].length));
61 }
62
63 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
64 {
65         int i;
66
67         for (i = 0; i < chunk->npages; ++i)
68                 dma_free_coherent(&dev->persist->pdev->linux_dev,
69                                   chunk->mem[i].length,
70                                   lowmem_page_address(sg_page(&chunk->mem[i])),
71                                   sg_dma_address(&chunk->mem[i]));
72 }
73
74 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
75 {
76         struct mlx4_icm_chunk *chunk, *tmp;
77
78         if (!icm)
79                 return;
80
81         list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
82                 if (coherent)
83                         mlx4_free_icm_coherent(dev, chunk);
84                 else
85                         mlx4_free_icm_pages(dev, chunk);
86
87                 kfree(chunk);
88         }
89
90         kfree(icm);
91 }
92
93 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
94                                 gfp_t gfp_mask, int node)
95 {
96         struct page *page;
97
98         page = alloc_pages_node(node, gfp_mask, order);
99         if (!page) {
100                 page = kva2page(get_cont_pages(order, gfp_mask));
101                 if (!page)
102                         return -ENOMEM;
103         }
104
105         sg_set_page(mem, page, PAGE_SIZE << order, 0);
106         return 0;
107 }
108
109 static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
110                                     int order, gfp_t gfp_mask)
111 {
112         void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
113                                        &sg_dma_address(mem), gfp_mask);
114         if (!buf)
115                 return -ENOMEM;
116
117         sg_set_buf(mem, buf, PAGE_SIZE << order);
118         assert(!(mem->offset));
119         sg_dma_len(mem) = PAGE_SIZE << order;
120         return 0;
121 }
122
123 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
124                                 gfp_t gfp_mask, int coherent)
125 {
126         struct mlx4_icm *icm;
127         struct mlx4_icm_chunk *chunk = NULL;
128         int cur_order;
129         int ret;
130
131         /* We use sg_set_buf for coherent allocs, which assumes low memory */
132         assert(!(coherent && (gfp_mask & __GFP_HIGHMEM)));
133
134         icm = kmalloc_node(sizeof(*icm),
135                            gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
136                            dev->numa_node);
137         if (!icm) {
138                 icm = kmalloc(sizeof(*icm),
139                               gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
140                 if (!icm)
141                         return NULL;
142         }
143
144         icm->refcount = 0;
145         INIT_LIST_HEAD(&icm->chunk_list);
146
147         cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
148
149         while (npages > 0) {
150                 if (!chunk) {
151                         chunk = kmalloc_node(sizeof(*chunk),
152                                              gfp_mask & ~(__GFP_HIGHMEM |
153                                                           __GFP_NOWARN),
154                                              dev->numa_node);
155                         if (!chunk) {
156                                 chunk = kmalloc(sizeof(*chunk),
157                                                 gfp_mask & ~(__GFP_HIGHMEM |
158                                                              __GFP_NOWARN));
159                                 if (!chunk)
160                                         goto fail;
161                         }
162
163                         sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
164                         chunk->npages = 0;
165                         chunk->nsg    = 0;
166                         list_add_tail(&chunk->list, &icm->chunk_list);
167                 }
168
169                 while (1 << cur_order > npages)
170                         --cur_order;
171
172                 if (coherent)
173 #if 0 // AKAROS_PORT
174                         ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
175 #else
176                         ret = mlx4_alloc_icm_coherent(0,
177 #endif
178                                                       &chunk->mem[chunk->npages],
179                                                       cur_order, gfp_mask);
180                 else
181                         ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
182                                                    cur_order, gfp_mask,
183                                                    dev->numa_node);
184
185                 if (ret) {
186                         if (--cur_order < 0)
187                                 goto fail;
188                         else
189                                 continue;
190                 }
191
192                 ++chunk->npages;
193
194                 if (coherent)
195                         ++chunk->nsg;
196                 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
197                         chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
198                                                 chunk->npages,
199                                                 PCI_DMA_BIDIRECTIONAL);
200
201                         if (chunk->nsg <= 0)
202                                 goto fail;
203                 }
204
205                 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
206                         chunk = NULL;
207
208                 npages -= 1 << cur_order;
209         }
210
211         if (!coherent && chunk) {
212                 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
213                                         chunk->npages,
214                                         PCI_DMA_BIDIRECTIONAL);
215
216                 if (chunk->nsg <= 0)
217                         goto fail;
218         }
219
220         return icm;
221
222 fail:
223         mlx4_free_icm(dev, icm, coherent);
224         return NULL;
225 }
226
227 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm,
228                         uint64_t virt)
229 {
230         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
231 }
232
233 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, uint64_t virt,
234                           uint32_t page_count)
235 {
236         return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
237                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
238 }
239
240 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
241 {
242         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
243 }
244
245 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
246 {
247         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
248                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
249 }
250
251 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table,
252                    uint32_t obj,
253                    gfp_t gfp)
254 {
255         uint32_t i = (obj & (table->num_obj - 1)) /
256                         (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
257         int ret = 0;
258
259         qlock(&table->mutex);
260
261         if (table->icm[i]) {
262                 ++table->icm[i]->refcount;
263                 goto out;
264         }
265
266         table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
267                                        (table->lowmem ? gfp : GFP_HIGHUSER) |
268                                        __GFP_NOWARN, table->coherent);
269         if (!table->icm[i]) {
270                 ret = -ENOMEM;
271                 goto out;
272         }
273
274         if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
275                          (uint64_t) i * MLX4_TABLE_CHUNK_SIZE)) {
276                 mlx4_free_icm(dev, table->icm[i], table->coherent);
277                 table->icm[i] = NULL;
278                 ret = -ENOMEM;
279                 goto out;
280         }
281
282         ++table->icm[i]->refcount;
283
284 out:
285         qunlock(&table->mutex);
286         return ret;
287 }
288
289 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table,
290                     uint32_t obj)
291 {
292         uint32_t i;
293         uint64_t offset;
294
295         i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
296
297         qlock(&table->mutex);
298
299         if (--table->icm[i]->refcount == 0) {
300                 offset = (uint64_t) i * MLX4_TABLE_CHUNK_SIZE;
301                 mlx4_UNMAP_ICM(dev, table->virt + offset,
302                                MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
303                 mlx4_free_icm(dev, table->icm[i], table->coherent);
304                 table->icm[i] = NULL;
305         }
306
307         qunlock(&table->mutex);
308 }
309
310 void *mlx4_table_find(struct mlx4_icm_table *table, uint32_t obj,
311                         dma_addr_t *dma_handle)
312 {
313         int offset, dma_offset, i;
314         uint64_t idx;
315         struct mlx4_icm_chunk *chunk;
316         struct mlx4_icm *icm;
317         struct page *page = NULL;
318
319         if (!table->lowmem)
320                 return NULL;
321
322         qlock(&table->mutex);
323
324         idx = (uint64_t) (obj & (table->num_obj - 1)) * table->obj_size;
325         icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
326         dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
327
328         if (!icm)
329                 goto out;
330
331         list_for_each_entry(chunk, &icm->chunk_list, list) {
332                 for (i = 0; i < chunk->npages; ++i) {
333                         if (dma_handle && dma_offset >= 0) {
334                                 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
335                                         *dma_handle = sg_dma_address(&chunk->mem[i]) +
336                                                 dma_offset;
337                                 dma_offset -= sg_dma_len(&chunk->mem[i]);
338                         }
339                         /*
340                          * DMA mapping can merge pages but not split them,
341                          * so if we found the page, dma_handle has already
342                          * been assigned to.
343                          */
344                         if (chunk->mem[i].length > offset) {
345                                 page = sg_page(&chunk->mem[i]);
346                                 goto out;
347                         }
348                         offset -= chunk->mem[i].length;
349                 }
350         }
351
352 out:
353         qunlock(&table->mutex);
354         return page ? lowmem_page_address(page) + offset : NULL;
355 }
356
357 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
358                          uint32_t start, uint32_t end)
359 {
360         int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
361         int err;
362         uint32_t i;
363
364         for (i = start; i <= end; i += inc) {
365                 err = mlx4_table_get(dev, table, i, MEM_WAIT);
366                 if (err)
367                         goto fail;
368         }
369
370         return 0;
371
372 fail:
373         while (i > start) {
374                 i -= inc;
375                 mlx4_table_put(dev, table, i);
376         }
377
378         return err;
379 }
380
381 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
382                           uint32_t start, uint32_t end)
383 {
384         uint32_t i;
385
386         for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
387                 mlx4_table_put(dev, table, i);
388 }
389
390 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
391                         uint64_t virt, int obj_size,    uint32_t nobj,
392                         int reserved,
393                         int use_lowmem, int use_coherent)
394 {
395         int obj_per_chunk;
396         int num_icm;
397         unsigned chunk_size;
398         int i;
399         uint64_t size;
400
401         obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
402         num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
403
404         table->icm      = kzmalloc((num_icm) * (sizeof *table->icm),
405                                    MEM_WAIT);
406         if (!table->icm)
407                 return -ENOMEM;
408         table->virt     = virt;
409         table->num_icm  = num_icm;
410         table->num_obj  = nobj;
411         table->obj_size = obj_size;
412         table->lowmem   = use_lowmem;
413         table->coherent = use_coherent;
414         qlock_init(&table->mutex);
415
416         size = (uint64_t) nobj * obj_size;
417         for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
418                 chunk_size = MLX4_TABLE_CHUNK_SIZE;
419                 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
420                         chunk_size = PAGE_ALIGN(size -
421                                         i * MLX4_TABLE_CHUNK_SIZE);
422
423                 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
424                                                (use_lowmem ? MEM_WAIT : GFP_HIGHUSER) |
425                                                __GFP_NOWARN, use_coherent);
426                 if (!table->icm[i])
427                         goto err;
428                 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
429                         mlx4_free_icm(dev, table->icm[i], use_coherent);
430                         table->icm[i] = NULL;
431                         goto err;
432                 }
433
434                 /*
435                  * Add a reference to this ICM chunk so that it never
436                  * gets freed (since it contains reserved firmware objects).
437                  */
438                 ++table->icm[i]->refcount;
439         }
440
441         return 0;
442
443 err:
444         for (i = 0; i < num_icm; ++i)
445                 if (table->icm[i]) {
446                         mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
447                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
448                         mlx4_free_icm(dev, table->icm[i], use_coherent);
449                 }
450
451         kfree(table->icm);
452
453         return -ENOMEM;
454 }
455
456 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
457 {
458         int i;
459
460         for (i = 0; i < table->num_icm; ++i)
461                 if (table->icm[i]) {
462                         mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
463                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
464                         mlx4_free_icm(dev, table->icm[i], table->coherent);
465                 }
466
467         kfree(table->icm);
468 }