2 * Copyright (c) 1999-2001
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 typedef __rcintptr pageid;
35 #define FREEPAGE ((region)-1) /* Id of a free page */
37 #define FREEPAGE (&zeroregion)
40 #define ASSERT_FREE(p)
41 #define ASSERT_INUSE(p, r)
43 #define ASSERT_FREE(p) assert(regionof(p) == FREEPAGE)
45 #define ASSERT_INUSE(p, r) assert(regionof(p) == r->base)
47 #define ASSERT_INUSE(p, r) assert(regionof(p) == r)
51 /* Page allocator for region-based memory management */
52 /* TBD: special free list for size == K ?? */
54 #define PAGECOUNTBITS (CHAR_BIT * sizeof(pageid) - 1)
58 /* Next page in region or in free list */
61 /* Doubly linked list of pages sorted by address */
62 struct page *next_address, *prev_address;
64 /* number of pages in this allocation unit. Negative for free pages. */
65 pageid pagecount : PAGECOUNTBITS;
67 unsigned int free : 1;
69 /* Only in free pages not in the single_pages list */
70 struct page *previous;
73 /* The pages are kept in a single list sorted by address via the
74 next_address and prev_address fields. The first page's prev_address and
75 the last page's next_address fields points to pages_byaddress.
76 page_byaddress.next_address is the first page
77 page_byaddress.prev_address is the last page
79 This list is used for coalescing operations.
81 static struct page pages_byaddress;
83 struct page *alloc_single_page(struct page *next);
84 void free_single_page(region r, struct page *p);
86 struct page *alloc_pages(int n, struct page *next);
87 void free_pages(region r, struct page *p);
90 /* a list of free individual pages */
91 struct page *single_pages;
93 /* free pages (not including those in single_pages) */
94 struct page *unused_pages;
96 static void init_pages(void)
98 pages_byaddress.next_address = &pages_byaddress;
99 pages_byaddress.prev_address = &pages_byaddress;
102 static void insertbefore_address(struct page *p, struct page *before)
104 p->prev_address = before->prev_address;
105 p->next_address = before;
106 before->prev_address = p;
107 p->prev_address->next_address = p;
110 static void unlink_address(struct page *p)
112 p->prev_address->next_address = p->next_address;
113 p->next_address->prev_address = p->prev_address;
116 static void addbyaddress(struct page *p)
118 struct page *address_scan;
120 /* Warning: this is slow. Calls to it should not be frequent (once app
121 reaches a steady state of memory usage). */
123 for (address_scan = pages_byaddress.next_address; ;
124 address_scan = address_scan->next_address)
125 if (p < address_scan || address_scan == &pages_byaddress)
127 insertbefore_address(p, address_scan);
132 /* Doubly linked page list management */
133 void addfront(struct page **list, struct page *p)
134 /* Effects: Adds p to the front of doubly-linked list list */
138 if (*list) (*list)->previous = p;
142 void unlink_page(struct page **list, struct page *p)
143 /* Effects: Remove p from its doubly linked list */
146 p->previous->next = p->next;
150 p->next->previous = p->previous;
153 void *region_get_mem(size_t s)
155 void *mem = malloc(s + RPAGESIZE - 1);
157 return (void *)ALIGN((__rcintptr)mem, RPAGESIZE);
160 /* Page to region map management */
161 /* ----------------------------- */
163 RADIX_TREE(__rcregionmap);
165 static void set_page_region(pageid pagenb, region r)
167 radix_tree_delete (&__rcregionmap, pagenb);
168 radix_tree_insert (&__rcregionmap, pagenb, r);
171 #define page_region(pagenb) (radix_tree_lookup (&__rcregionmap, (pagenb)))
173 void set_region(struct page *p, int npages, region r)
175 pageid pnb = PAGENB(p);
178 set_page_region(pnb++, r);
181 /* Mark the memory range from 'from' (inclusive) to 'to' (exclusive)
182 as belonging to region with id 'rid' */
183 void set_region_range(void *from, void *to, region r)
185 pageid first = PAGENB(from), last = PAGENB((pageid)to - 1);
187 while (first <= last)
188 set_page_region(first++, r);
191 /* Multi-page allocation management */
192 /* -------------------------------- */
194 struct page *alloc_new(int n, struct page *next)
195 /* Assumes freepages_lock held */
197 struct page *newp = region_get_mem(n << RPAGELOG);
205 assert(!((long)newp & (RPAGESIZE - 1)));
213 pageid i, pnb = PAGENB(newp);
215 for (i = pnb; i < pnb + n; i++)
216 set_page_region(i, FREEPAGE);
223 struct page *alloc_split(struct page *split, int n, struct page *next)
224 /* Assumes freepages_lock held */
227 /* These pages had better be free */
228 pageid i, pnb = PAGENB(split);
230 assert(split->pagecount >= n);
231 for (i = pnb; i < pnb + split->pagecount; i++)
232 assert(page_region(i) == FREEPAGE);
234 if (split->pagecount > n)
236 struct page *splitoff;
238 /* Keep first part of block */
239 split->pagecount -= n;
240 /* Return latter part of block */
242 split = (struct page *)((char *)split + (split->pagecount << RPAGELOG));
244 /* Update the by adress list */
245 insertbefore_address(split, splitoff->next_address);
249 /* remove split from list */
250 unlink_page(&unused_pages, split);
253 split->pagecount = n;
259 struct page *alloc_pages(int n, struct page *next)
272 return alloc_new(n, next);
274 if (scan->pagecount >= n) break;
278 /* Now find best fit */
280 bestn = scan->pagecount;
285 return alloc_split(best, n, next);
287 if (scan->pagecount >=n && scan->pagecount < bestn)
290 bestn = scan->pagecount;
295 static void coalesce(struct page *p)
297 struct page *prev = p->prev_address, *next;
301 /* Coalesce with predecessor ? */
302 if (prev->free && (char *)prev + (prev->pagecount << RPAGELOG) == (char *)p)
304 prev->pagecount += p->pagecount;
308 else /* No, add to free pages list */
309 addfront(&unused_pages, p);
311 next = p->next_address;
312 /* Coalesce with successor ? */
313 if (next->free && (char *)p + (p->pagecount << RPAGELOG) == (char *)next)
315 unlink_page(&unused_pages, next);
316 p->pagecount += next->pagecount;
317 unlink_address(next);
321 void free_pages(region r, struct page *p)
322 /* Assumes freepages_lock held */
325 pageid i, pnb = PAGENB(p);
327 for (i = pnb; i < pnb + p->pagecount; i++)
329 assert(page_region(i) == r);
330 set_page_region(i, FREEPAGE);
338 /* Single page management */
339 /* ---------------------- */
341 static int single_page_count;
343 static void add_single_pages(struct page *base)
344 /* Effects: Adds pages at base to the single_pages list */
346 pageid n = base->pagecount;
347 struct page *prev = base->prev_address, *basenext = base->next_address,
350 single_page_count += n;
355 base->free = 0; /* Not free so that coalesce won't steal these back */
356 base->prev_address = prev;
358 base->next = single_pages;
362 next = (struct page *)((char *)base + RPAGESIZE);
363 base->next_address = next;
366 base->next_address = basenext;
367 basenext->prev_address = base;
370 void scavenge_single_pages(int n)
372 /* Add n pages to the single_pages list */
373 struct page *scan, *best;
376 /* Take any group in unused_pages that is <= n or < K.
377 Remember smallest entry > n too. This is sortof equivalent to
378 a best fit where we allow partial allocations to make up a whole */
380 bestn = (__rcintptr)1 << (sizeof(__rcintptr) * CHAR_BIT - 2);
384 /* The pages < K can't be used for anything but single pages so we
385 might as well grab them even if they are a little too big */
386 if (scan->pagecount <= n || scan->pagecount < K)
388 struct page *adding = scan;
391 n -= adding->pagecount;
392 unlink_page(&unused_pages, adding);
393 add_single_pages(adding);
398 if (scan->pagecount < bestn)
400 bestn = scan->pagecount;
406 /* Still not enough. Split the best block if there is one, allocate
407 new pages otherwise */
409 add_single_pages(alloc_new(n, NULL));
410 else if (best->pagecount - n < K)
412 unlink_page(&unused_pages, best);
413 add_single_pages(best);
416 add_single_pages(alloc_split(best, n, NULL));
419 struct page *alloc_single_page(struct page *next)
425 scavenge_single_pages(PAGE_GROUP_SIZE);
427 ASSERT_FREE(single_pages);
429 single_pages = p->next;
437 void free_single_page(region r, struct page *p)
438 /* Assumes freepages_lock held */
442 set_page_region(PAGENB(p), FREEPAGE);
445 /* Once free list is big enough just coalesce the pages.
446 The actual threshold to use might merit further study (something
447 adaptive ? e.g., proportional to allocated single pages) */
448 if (single_page_count > PAGE_GROUP_SIZE * 2)
455 p->next = single_pages;