1 |
/* alloc_pool.c - routines for allocating memory from tagged pools */ |
2 |
|
3 |
/* Copyright 1991 Mark Russell, University of Kent at Canterbury. |
4 |
* |
5 |
* You can do what you like with this source code as long as |
6 |
* you don't try to make money out of it and you include an |
7 |
* unaltered copy of this message (including the copyright). |
8 |
*/ |
9 |
|
10 |
char ukcprog_alloc_sccsid[] = "$Id: alloc.c,v 1.10 1993/10/25 11:44:23 mtr Exp $ UKC"; |
11 |
|
12 |
#include <stdio.h> /* for NULL - grrr */ |
13 |
#include <stdlib.h> |
14 |
#include <string.h> |
15 |
#ifndef __STDC__ |
16 |
#include <memory.h> |
17 |
#endif |
18 |
|
19 |
#include "ukcprog.h" |
20 |
|
21 |
/* This is a conservative guess at the per-request malloc overhead in |
22 |
* bytes. Nothing breaks if this is wrong. |
23 |
*/ |
24 |
#define MALLOC_OVERHEAD 24 |
25 |
|
26 |
/* When we run out a space in an alloc pool we add another block. |
27 |
* We add small blocks (SBLOCKSIZE bytes each) for the first NSMALLBLOCKS |
28 |
* requests, then switch switch to large (BLOCKSIZE) ones. |
29 |
* |
30 |
* The intention is that we don't gobble large amounts of memory for |
31 |
* a small alloc pool, but that we are reasonablty efficient for |
32 |
* one that's continually growing. |
33 |
* |
34 |
* Currently, we go slowly (256 bytes a go) for the first 8K, then |
35 |
* fast (4K a go). |
36 |
*/ |
37 |
#define NSMALLBLOCKS 32 |
38 |
|
39 |
/* Size of the first block for an alloc pool (requested when the alloc |
40 |
* pool is created) and for the subsequent NSMALLBLOCKS blocks. |
41 |
*/ |
42 |
#define SBLOCKSIZE (256 - sizeof(alloc_pool_t) - MALLOC_OVERHEAD) |
43 |
|
44 |
/* Size of the requested for an alloc pool after the first NSMALLBLOCKS |
45 |
* block additions. |
46 |
* |
47 |
* Try to make the malloc request size a bit less than a power of two |
48 |
* to compensate for brain-damaged mallocs that add overhead then round |
49 |
* up to a power of two. |
50 |
*/ |
51 |
#define BLOCKSIZE (4096 - sizeof(ablock_t) - MALLOC_OVERHEAD) |
52 |
|
53 |
/* Maximum alignment requirements for all types *including* float and double. |
54 |
*/ |
55 |
#define ALIGN sizeof(double) |
56 |
|
57 |
typedef struct ablockst { |
58 |
union { |
59 |
double align; |
60 |
struct ablock { |
61 |
char *abu_buf; |
62 |
char *abu_pos; |
63 |
char *abu_end; |
64 |
size_t abu_size; |
65 |
struct ablockst *abu_next; |
66 |
} a; |
67 |
} u; |
68 |
} ablock_t; |
69 |
|
70 |
#define ab_buf u.a.abu_buf |
71 |
#define ab_pos u.a.abu_pos |
72 |
#define ab_end u.a.abu_end |
73 |
#define ab_size u.a.abu_size |
74 |
#define ab_next u.a.abu_next |
75 |
|
76 |
struct alloc_pool_s { |
77 |
ablock_t *ap_ablock; |
78 |
ablock_t *ap_freelist; |
79 |
int ap_nblocks; |
80 |
bool ap_debug; |
81 |
ablock_t ap_first_ablock; |
82 |
}; |
83 |
|
84 |
struct alloc_mark_s { |
85 |
alloc_pool_t *am_apool; |
86 |
ablock_t *am_ablock; |
87 |
char *am_pos; |
88 |
char *am_end; |
89 |
}; |
90 |
|
91 |
static ablock_t *push_ablock PROTO((alloc_pool_t *ap, ablock_t *ab, unsigned size)); |
92 |
static ablock_t *find_ab PROTO((alloc_pool_t *ap, unsigned size)); |
93 |
static void reset_ablocks PROTO((alloc_pool_t *ap, ablock_t *limab)); |
94 |
|
95 |
/* The default debug flag for a new alloc_pool. When the debug flag |
96 |
* is TRUE, we initialise memory to garbage, and set it to (different) |
97 |
* garbage when free_alloc_pool is called. |
98 |
*/ |
99 |
static bool Default_debug_flag = TRUE; |
100 |
|
101 |
bool |
102 |
alloc_set_default_debug_flag(val) |
103 |
bool val; |
104 |
{ |
105 |
bool oldval; |
106 |
|
107 |
oldval = Default_debug_flag; |
108 |
Default_debug_flag = val; |
109 |
return oldval; |
110 |
} |
111 |
|
112 |
bool |
113 |
alloc_set_debug_flag(ap, val) |
114 |
alloc_pool_t *ap; |
115 |
bool val; |
116 |
{ |
117 |
bool oldval; |
118 |
|
119 |
oldval = ap->ap_debug; |
120 |
ap->ap_debug = val; |
121 |
return oldval; |
122 |
} |
123 |
|
124 |
/* Make a new alloc_pool(). We make an initial allocation of a small |
125 |
* amount of memory, to make small alloc pool creation cheap (one malloc). |
126 |
*/ |
127 |
alloc_pool_t * |
128 |
alloc_create_pool() |
129 |
{ |
130 |
alloc_pool_t *ap; |
131 |
|
132 |
ap = (alloc_pool_t *)e_malloc(sizeof(alloc_pool_t) + SBLOCKSIZE); |
133 |
ap->ap_ablock = NULL; |
134 |
ap->ap_freelist = NULL; |
135 |
ap->ap_nblocks = 0; |
136 |
ap->ap_debug = Default_debug_flag; |
137 |
push_ablock(ap, &ap->ap_first_ablock, SBLOCKSIZE); |
138 |
|
139 |
return ap; |
140 |
} |
141 |
|
142 |
static void |
143 |
reset_ablocks(ap, limab) |
144 |
alloc_pool_t *ap; |
145 |
ablock_t *limab; |
146 |
{ |
147 |
ablock_t *ab, *next; |
148 |
bool debug; |
149 |
|
150 |
debug = ap->ap_debug; |
151 |
for (ab = ap->ap_ablock; ab != limab; ab = next) { |
152 |
next = ab->ab_next; |
153 |
if (debug) |
154 |
memset(ab->ab_buf, 0x42, ab->ab_size); |
155 |
ab->ab_pos = ab->ab_buf; |
156 |
ab->ab_end = ab->ab_pos + ab->ab_size; |
157 |
ab->ab_next = ap->ap_freelist; |
158 |
ap->ap_freelist = ab; |
159 |
} |
160 |
} |
161 |
|
162 |
void |
163 |
alloc_reset_pool(ap) |
164 |
alloc_pool_t *ap; |
165 |
{ |
166 |
ablock_t *ab; |
167 |
|
168 |
ab = &ap->ap_first_ablock; |
169 |
|
170 |
reset_ablocks(ap, ab); |
171 |
|
172 |
if (ap->ap_debug) |
173 |
memset(ab->ab_buf, 0x42, ab->ab_size); |
174 |
ab->ab_pos = ab->ab_buf; |
175 |
ab->ab_end = ab->ab_pos + ab->ab_size; |
176 |
|
177 |
ap->ap_ablock = ab; |
178 |
} |
179 |
|
180 |
void |
181 |
alloc_free_pool(ap) |
182 |
alloc_pool_t *ap; |
183 |
{ |
184 |
ablock_t *ab, *next; |
185 |
bool debug; |
186 |
|
187 |
debug = ap->ap_debug; |
188 |
|
189 |
/* The odd form of the loop here is because we want to overwrite |
190 |
* all blocks with garbage (if debug is set), but we don't want |
191 |
* to free the last block in the chain, which is allocated as part |
192 |
* of the header block. |
193 |
*/ |
194 |
ab = ap->ap_ablock; |
195 |
for (;;) { |
196 |
next = ab->ab_next; |
197 |
if (debug) |
198 |
memset(ab->ab_buf, 0x42, ab->ab_size); |
199 |
if (next == NULL) |
200 |
break; |
201 |
free((char *)ab); |
202 |
ab = next; |
203 |
} |
204 |
|
205 |
free((char *)ap); |
206 |
} |
207 |
|
208 |
static ablock_t * |
209 |
push_ablock(ap, ab, size) |
210 |
alloc_pool_t *ap; |
211 |
ablock_t *ab; |
212 |
unsigned size; |
213 |
{ |
214 |
ab->ab_buf = ab->ab_pos = (char *)&ab[1]; |
215 |
ab->ab_end = ab->ab_buf + size; |
216 |
ab->ab_size = size; |
217 |
ab->ab_next = ap->ap_ablock; |
218 |
ap->ap_ablock = ab; |
219 |
|
220 |
if (ap->ap_debug) |
221 |
memset(ab->ab_buf, 0x53, (size_t)size); |
222 |
|
223 |
return ab; |
224 |
} |
225 |
|
226 |
/* Find an ablock with at least nbytes free. If the block at the |
227 |
* head of the free list is big enough, use that. Otherwise malloc |
228 |
* a new ablock and push it on the chain. |
229 |
*/ |
230 |
static ablock_t * |
231 |
find_ab(ap, size) |
232 |
alloc_pool_t *ap; |
233 |
unsigned size; |
234 |
{ |
235 |
ablock_t *ab; |
236 |
|
237 |
if (ap->ap_freelist != NULL && ap->ap_freelist->ab_size >= size) { |
238 |
ab = ap->ap_freelist; |
239 |
ap->ap_freelist = ap->ap_freelist->ab_next; |
240 |
ab->ab_next = ap->ap_ablock; |
241 |
ap->ap_ablock = ab; |
242 |
} |
243 |
else { |
244 |
voidptr buf; |
245 |
unsigned blocksize; |
246 |
|
247 |
blocksize = (ap->ap_nblocks < NSMALLBLOCKS) ? SBLOCKSIZE : BLOCKSIZE; |
248 |
if (size < blocksize) |
249 |
size = blocksize; |
250 |
if ((buf = malloc((size_t)(sizeof(ablock_t) + size))) == NULL) |
251 |
return NULL; |
252 |
ab = push_ablock(ap, (ablock_t *)buf, size); |
253 |
++ap->ap_nblocks; |
254 |
} |
255 |
return ab; |
256 |
} |
257 |
|
258 |
/* Allocate nbytes from alloc pool ap. This interface never |
259 |
* returns NULL - if memory runs out we panic. |
260 |
*/ |
261 |
voidptr |
262 |
alloc(ap, nbytes) |
263 |
alloc_pool_t *ap; |
264 |
size_t nbytes; |
265 |
{ |
266 |
ablock_t *ab; |
267 |
int over; |
268 |
char *ptr; |
269 |
|
270 |
over = nbytes % ALIGN; |
271 |
if (over != 0) |
272 |
nbytes += ALIGN - over; |
273 |
|
274 |
ab = ap->ap_ablock; |
275 |
|
276 |
if (nbytes > ab->ab_end - ab->ab_pos) { |
277 |
ab = find_ab(ap, (unsigned)nbytes); |
278 |
if (ab == NULL) |
279 |
panic("out of memory in alloc"); |
280 |
} |
281 |
|
282 |
ptr = ab->ab_pos; |
283 |
ab->ab_pos += nbytes; |
284 |
|
285 |
return ptr; |
286 |
} |
287 |
|
288 |
/* Like alloc, but return NULL if we can't satisfy the request. |
289 |
*/ |
290 |
voidptr |
291 |
alloc_ck(ap, nbytes) |
292 |
alloc_pool_t *ap; |
293 |
size_t nbytes; |
294 |
{ |
295 |
ablock_t *ab; |
296 |
int over; |
297 |
char *ptr; |
298 |
|
299 |
over = nbytes % ALIGN; |
300 |
if (over != 0) |
301 |
nbytes += ALIGN - over; |
302 |
|
303 |
ab = ap->ap_ablock; |
304 |
|
305 |
if (nbytes > ab->ab_end - ab->ab_pos) { |
306 |
ab = find_ab(ap, nbytes); |
307 |
if (ab == NULL) |
308 |
return NULL; |
309 |
} |
310 |
|
311 |
ptr = ab->ab_pos; |
312 |
ab->ab_pos += nbytes; |
313 |
|
314 |
return ptr; |
315 |
} |
316 |
|
317 |
alloc_mark_t * |
318 |
alloc_mark(ap) |
319 |
alloc_pool_t *ap; |
320 |
{ |
321 |
alloc_mark_t *am; |
322 |
ablock_t *save_ab; |
323 |
char *save_pos, *save_end; |
324 |
|
325 |
save_ab = ap->ap_ablock; |
326 |
save_pos = save_ab->ab_pos; |
327 |
save_end = save_ab->ab_end; |
328 |
|
329 |
am = (alloc_mark_t *)alloc(ap, sizeof(alloc_mark_t)); |
330 |
am->am_apool = ap; |
331 |
am->am_ablock = save_ab; |
332 |
am->am_pos = save_pos; |
333 |
am->am_end = save_end; |
334 |
|
335 |
return am; |
336 |
} |
337 |
|
338 |
void |
339 |
alloc_release(ap, am) |
340 |
alloc_pool_t *ap; |
341 |
alloc_mark_t *am; |
342 |
{ |
343 |
ablock_t *ab; |
344 |
alloc_mark_t mark; |
345 |
|
346 |
if (am->am_apool != ap) |
347 |
panic("id botch in ar"); |
348 |
|
349 |
/* If debug is set, we are about to step on the store that |
350 |
* the mark was allocated from, so save it. |
351 |
*/ |
352 |
mark = *am; |
353 |
ab = mark.am_ablock; |
354 |
|
355 |
reset_ablocks(ap, ab); |
356 |
|
357 |
if (ap->ap_debug) { |
358 |
memset(mark.am_pos, 0x42, (size_t)(ab->ab_pos - mark.am_pos)); |
359 |
memset(ab->ab_end, 0x42, (size_t)(mark.am_end - ab->ab_end)); |
360 |
} |
361 |
else { |
362 |
/* Make sure the application can't use this mark again. |
363 |
*/ |
364 |
am->am_apool = NULL; |
365 |
} |
366 |
|
367 |
ab->ab_pos = mark.am_pos; |
368 |
ab->ab_end = mark.am_end; |
369 |
ap->ap_ablock = ab; |
370 |
} |
371 |
|
372 |
/* Like alloc(), except that the result is assumed not to need alignment. |
373 |
* We work from the other end of the pool than alloc so hopefully all the |
374 |
* string requests will be packed together with no alignment holes. |
375 |
* |
376 |
* We never return NULL - if we can't fulfill the request we panic. |
377 |
*/ |
378 |
char * |
379 |
allocstr(ap, nbytes) |
380 |
alloc_pool_t *ap; |
381 |
size_t nbytes; |
382 |
{ |
383 |
ablock_t *ab; |
384 |
|
385 |
ab = ap->ap_ablock; |
386 |
|
387 |
if (nbytes > ab->ab_end - ab->ab_pos) { |
388 |
ab = find_ab(ap, (unsigned)nbytes); |
389 |
if (ab == NULL) |
390 |
panic("out of memory in allocstr"); |
391 |
} |
392 |
|
393 |
return ab->ab_end -= nbytes; |
394 |
} |
395 |
|
396 |
char * |
397 |
allocstr_ck(ap, nbytes) |
398 |
alloc_pool_t *ap; |
399 |
size_t nbytes; |
400 |
{ |
401 |
ablock_t *ab; |
402 |
|
403 |
ab = ap->ap_ablock; |
404 |
|
405 |
/* We cast nbytes to unsigned to catch negative values: they |
406 |
* turn into huge positive values which get caught by e_malloc(). |
407 |
*/ |
408 |
if ((unsigned)nbytes > ab->ab_end - ab->ab_pos) { |
409 |
ab = find_ab(ap, (unsigned)nbytes); |
410 |
if (ab == NULL) |
411 |
return NULL; |
412 |
} |
413 |
|
414 |
return ab->ab_end -= nbytes; |
415 |
} |
416 |
|
417 |
char * |
418 |
alloc_strdup(ap, s) |
419 |
alloc_pool_t *ap; |
420 |
const char *s; |
421 |
{ |
422 |
size_t nbytes; |
423 |
|
424 |
nbytes = strlen(s) + 1; |
425 |
return memcpy(allocstr(ap, nbytes), s, nbytes); |
426 |
} |