1 /* Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
2 * Portions Copyright © 1997-1999 Vita Nuova Limited
3 * Portions Copyright © 2000-2007 Vita Nuova Holdings Limited
5 * Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
7 * Modified for the Akaros operating system:
8 * Copyright (c) 2013-2014 The Regents of the University of California
9 * Copyright (c) 2013-2015 Google Inc.
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
44 /* Note that Hdrspc is only available via padblock (to the 'left' of the rp). */
46 Hdrspc = 128, /* leave room for high-level headers */
47 Bdead = 0x51494F42, /* "QIOB" */
48 BLOCKALIGN = 32, /* was the old BY2V in inferno, which was 8 */
52 * allocate blocks (round data base address to 64 bit boundary).
53 * if mallocz gives us more than we asked for, leave room at the front
56 struct block *block_alloc(size_t size, int mem_flags)
62 /* If Hdrspc is not block aligned it will cause issues. */
63 static_assert(Hdrspc % BLOCKALIGN == 0);
65 b = kmalloc(sizeof(struct block) + size + Hdrspc + (BLOCKALIGN - 1),
79 addr = ROUNDUP(addr + sizeof(struct block), BLOCKALIGN);
80 b->base = (uint8_t *) addr;
81 /* TODO: support this */
82 /* interesting. We can ask the allocator, after allocating,
83 * the *real* size of the block we got. Very nice.
85 b->lim = ((uint8_t*)b) + msize(b);
86 * See use of n in commented code below
89 ((uint8_t *) b) + sizeof(struct block) + size + Hdrspc + (BLOCKALIGN -
92 /* TODO: support this */
93 /* n is supposed to be Hdrspc + rear padding + extra reserved memory, but
94 * since we don't currently support checking how much memory was actually
95 * reserved, this is always Hdrspc + rear padding. After rounding that down
96 * to BLOCKALIGN, it's always Hdrpsc since the padding is < BLOCKALIGN.
97 n = b->lim - b->base - size;
98 b->rp += n & ~(BLOCKALIGN - 1);
102 /* b->base is aligned, rounded up from b
103 * b->lim is the upper bound on our malloc
104 * b->rp is advanced by some aligned amount, based on how much extra we
105 * received from kmalloc and the Hdrspc. */
109 /* Makes sure b has nr_bufs extra_data. Will grow, but not shrink, an existing
110 * extra_data array. When growing, it'll copy over the old entries. All new
111 * entries will be zeroed. mem_flags determines if we'll block on kmallocs.
113 * Return 0 on success or -1 on error.
114 * Caller is responsible for concurrent access to the block's metadata. */
115 int block_add_extd(struct block *b, unsigned int nr_bufs, int mem_flags)
117 unsigned int old_nr_bufs = b->nr_extra_bufs;
118 size_t old_amt = sizeof(struct extra_bdata) * old_nr_bufs;
119 size_t new_amt = sizeof(struct extra_bdata) * nr_bufs;
122 if (old_nr_bufs >= nr_bufs)
125 new_bdata = krealloc(b->extra_data, new_amt, mem_flags);
128 memset(new_bdata + old_amt, 0, new_amt - old_amt);
130 new_bdata = kzmalloc(new_amt, mem_flags);
134 b->extra_data = new_bdata;
135 b->nr_extra_bufs = nr_bufs;
139 /* Go backwards from the end of the list, remember the last unused slot, and
140 * stop when a used slot is encountered. */
141 static struct extra_bdata *next_unused_slot(struct block *b)
143 struct extra_bdata *ebd = NULL;
145 for (int i = b->nr_extra_bufs - 1; i >= 0; i--) {
146 if (b->extra_data[i].base)
148 ebd = &b->extra_data[i];
153 /* Append an extra data buffer @base with offset @off of length @len to block
154 * @b. Reuse an unused extra data slot if there's any.
155 * Return 0 on success or -1 on error. */
156 int block_append_extra(struct block *b, uintptr_t base, uint32_t off,
157 uint32_t len, int mem_flags)
159 unsigned int nr_bufs = b->nr_extra_bufs + 1;
160 struct extra_bdata *ebd;
162 ebd = next_unused_slot(b);
164 if (block_add_extd(b, nr_bufs, mem_flags) != 0)
166 ebd = next_unused_slot(b);
172 b->extra_len += ebd->len;
176 void free_block_extra(struct block *b)
178 struct extra_bdata *ebd;
180 /* assuming our release method is kfree, which will change when we support
182 for (int i = 0; i < b->nr_extra_bufs; i++) {
183 ebd = &b->extra_data[i];
185 kfree((void*)ebd->base);
188 b->nr_extra_bufs = 0;
189 kfree(b->extra_data); /* harmless if it is 0 */
190 b->extra_data = 0; /* in case the block is reused by a free override */
193 /* Frees a block, returning its size (len, not alloc) */
194 size_t freeb(struct block *b)
196 void *dead = (void *)Bdead;
204 * drivers which perform non cache coherent DMA manage their own buffer
205 * pool of uncached buffers and provide their own free routine.
211 /* poison the block in case someone is still holding onto it */
221 /* Free a list of blocks, returning their total size. */
222 size_t freeblist(struct block *b)
227 for (; b != 0; b = next) {
235 void checkb(struct block *b, char *msg)
237 void *dead = (void *)Bdead;
238 struct extra_bdata *ebd;
239 size_t extra_len = 0;
242 panic("checkb b %s 0x%lx", msg, b);
243 if (b->base == dead || b->lim == dead || b->next == dead
244 || b->rp == dead || b->wp == dead) {
245 printd("checkb: base 0x%8.8lx lim 0x%8.8lx next 0x%8.8lx\n",
246 b->base, b->lim, b->next);
247 printd("checkb: rp 0x%8.8lx wp 0x%8.8lx\n", b->rp, b->wp);
248 panic("checkb dead: %s\n", msg);
251 if (b->base > b->lim)
252 panic("checkb 0 %s 0x%lx 0x%lx", msg, b->base, b->lim);
254 panic("checkb 1 %s 0x%lx 0x%lx", msg, b->base, b->rp);
256 panic("checkb 2 %s 0x%lx 0x%lx", msg, b->base, b->wp);
258 panic("checkb 3 %s 0x%lx 0x%lx", msg, b->rp, b->lim);
260 panic("checkb 4 %s 0x%lx 0x%lx", msg, b->wp, b->lim);
261 if (b->nr_extra_bufs && !b->extra_data)
262 panic("checkb 5 %s missing extra_data", msg);
264 for (int i = 0; i < b->nr_extra_bufs; i++) {
265 ebd = &b->extra_data[i];
266 if (!ebd->base && (ebd->off || ebd->len))
267 panic("checkb %s: ebd %d has no base, but has off %d and len %d",
268 msg, i, ebd->off, ebd->len);
270 if (!kmalloc_refcnt((void*)ebd->base))
271 panic("checkb %s: buf %d, base %p has no refcnt!\n", msg, i,
273 extra_len += ebd->len;
276 if (extra_len != b->extra_len)
277 panic("checkb %s: block extra_len %d differs from sum of ebd len %d",
278 msg, b->extra_len, extra_len);
281 void printblock(struct block *b)
284 unsigned int off, elen;
285 struct extra_bdata *e;
288 printk("block is null\n");
292 printk("block of BLEN = %d, with %d header and %d data in %d extras\n",
293 BLEN(b), BHLEN(b), b->extra_len, b->nr_extra_bufs);
298 for (c = b->rp; c < b->wp; c++) {
299 printk(" %02x", *c & 0xff);
303 printk("%2x:\t", off);
308 for (int i = 0; (i < b->nr_extra_bufs) && elen; i++) {
309 e = &b->extra_data[i];
313 printk("data %d:\n", i);
315 for (off = 0; off < e->len; off++) {
316 c = (unsigned char *)e->base + e->off + off;
317 printk(" %02x", *c & 0xff);
318 if ((off + 1) % 8 == 0 && off +1 < e->len) {
320 printk("%2x:\t", off + 1);