1 /* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <apr_pools.h>
21 #include "serf_bucket_util.h"
24 typedef struct node_header_t {
27 struct node_header_t *next; /* if size == 0 (freed/inactive) */
28 /* no data if size == STANDARD_NODE_SIZE */
29 apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
33 /* The size of a node_header_t, properly aligned. Note that (normally)
34 * this macro will round the size to a multiple of 8 bytes. Keep this in
35 * mind when altering the node_header_t structure. Also, keep in mind that
36 * node_header_t is an overhead for every allocation performed through
37 * the serf_bucket_mem_alloc() function.
39 #define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
42 /* STANDARD_NODE_SIZE is manually set to an allocation size that will
43 * capture most allocators performed via this API. It must be "large
44 * enough" to avoid lots of spillage to allocating directly from the
45 * apr_allocator associated with the bucket allocator. The apr_allocator
46 * has a minimum size of 8k, which can be expensive if you missed the
47 * STANDARD_NODE_SIZE by just a few bytes.
49 /* ### we should define some rules or ways to determine how to derive
50 * ### a "good" value for this. probably log some stats on allocs, then
51 * ### analyze them for size "misses". then find the balance point between
52 * ### wasted space due to min-size allocator, and wasted-space due to
53 * ### size-spill to the 8k minimum.
55 #define STANDARD_NODE_SIZE 128
57 /* When allocating a block of memory from the allocator, we should go for
58 * an 8k block, minus the overhead that the allocator needs.
60 #define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
62 /* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
63 * calls to serf_bucket_mem_free().
65 #define DEBUG_DOUBLE_FREE
69 const serf_bucket_t *bucket;
73 #define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
76 int next_index; /* info[] is a ring. next bucket goes at this idx. */
79 read_status_t info[TRACK_BUCKET_COUNT];
83 struct serf_bucket_alloc_t {
85 apr_allocator_t *allocator;
87 serf_unfreed_func_t unfreed;
90 apr_uint32_t num_alloc;
92 node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
93 apr_memnode_t *blocks; /* blocks we allocated for subdividing */
98 /* ==================================================================== */
101 static apr_status_t allocator_cleanup(void *data)
103 serf_bucket_alloc_t *allocator = data;
105 /* If we allocated anything, give it back. */
106 if (allocator->blocks) {
107 apr_allocator_free(allocator->allocator, allocator->blocks);
113 SERF_DECLARE(serf_bucket_alloc_t *) serf_bucket_allocator_create(
115 serf_unfreed_func_t unfreed,
118 serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
120 allocator->pool = pool;
121 allocator->allocator = apr_pool_allocator_get(pool);
122 allocator->unfreed = unfreed;
123 allocator->unfreed_baton = unfreed_baton;
125 #ifdef SERF_DEBUG_BUCKET_USE
127 track_state_t *track;
129 track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
130 track->next_index = 0;
135 /* ### this implies buckets cannot cross a fork/exec. desirable?
137 * ### hmm. it probably also means that buckets cannot be AROUND
138 * ### during a fork/exec. the new process will try to clean them
139 * ### up and figure out there are unfreed blocks...
141 apr_pool_cleanup_register(pool, allocator,
142 allocator_cleanup, allocator_cleanup);
147 SERF_DECLARE(apr_pool_t *) serf_bucket_allocator_get_pool(
148 const serf_bucket_alloc_t *allocator)
150 return allocator->pool;
153 SERF_DECLARE(void *) serf_bucket_mem_alloc(
154 serf_bucket_alloc_t *allocator,
159 ++allocator->num_alloc;
161 size += SIZEOF_NODE_HEADER_T;
162 if (size <= STANDARD_NODE_SIZE) {
163 if (allocator->freelist) {
164 /* just pull a node off our freelist */
165 node = allocator->freelist;
166 allocator->freelist = node->u.next;
167 #ifdef DEBUG_DOUBLE_FREE
168 /* When we free an item, we set its size to zero. Thus, when
169 * we return it to the caller, we must ensure the size is set
172 node->size = STANDARD_NODE_SIZE;
176 apr_memnode_t *active = allocator->blocks;
179 || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
180 apr_memnode_t *head = allocator->blocks;
182 /* ran out of room. grab another block. */
183 active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
185 /* link the block into our tracking list */
186 allocator->blocks = active;
190 node = (node_header_t *)active->first_avail;
191 node->size = STANDARD_NODE_SIZE;
192 active->first_avail += STANDARD_NODE_SIZE;
196 apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
199 node = (node_header_t *)memnode->first_avail;
200 node->u.memnode = memnode;
204 return ((char *)node) + SIZEOF_NODE_HEADER_T;
207 SERF_DECLARE(void) serf_bucket_mem_free(
208 serf_bucket_alloc_t *allocator,
213 --allocator->num_alloc;
215 node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
217 if (node->size == STANDARD_NODE_SIZE) {
218 /* put the node onto our free list */
219 node->u.next = allocator->freelist;
220 allocator->freelist = node;
222 #ifdef DEBUG_DOUBLE_FREE
223 /* note that this thing was freed. */
226 else if (node->size == 0) {
227 /* damn thing was freed already. */
232 #ifdef DEBUG_DOUBLE_FREE
233 /* note that this thing was freed. */
238 apr_allocator_free(allocator->allocator, node->u.memnode);
243 /* ==================================================================== */
246 #ifdef SERF_DEBUG_BUCKET_USE
248 static read_status_t *find_read_status(
249 track_state_t *track,
250 const serf_bucket_t *bucket,
255 if (track->num_used) {
256 int count = track->num_used;
257 int idx = track->next_index;
259 /* Search backwards. In all likelihood, the bucket which just got
260 * read was read very recently.
262 while (count-- > 0) {
264 /* assert: track->num_used == TRACK_BUCKET_COUNT */
265 idx = track->num_used - 1;
267 if ((rs = &track->info[idx])->bucket == bucket) {
273 /* Only create a new read_status_t when asked. */
277 if (track->num_used < TRACK_BUCKET_COUNT) {
278 /* We're still filling up the ring. */
282 rs = &track->info[track->next_index];
284 rs->last = APR_SUCCESS; /* ### the right initial value? */
286 if (++track->next_index == TRACK_BUCKET_COUNT)
287 track->next_index = 0;
292 #endif /* SERF_DEBUG_BUCKET_USE */
295 SERF_DECLARE(apr_status_t) serf_debug__record_read(
296 const serf_bucket_t *bucket,
299 #ifndef SERF_DEBUG_BUCKET_USE
303 track_state_t *track = bucket->allocator->track;
304 read_status_t *rs = find_read_status(track, bucket, 1);
306 /* Validate that the previous status value allowed for another read. */
307 if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
308 /* Somebody read when they weren't supposed to. Bail. */
312 /* Save the current status for later. */
319 SERF_DECLARE(void) serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
321 #ifdef SERF_DEBUG_BUCKET_USE
323 track_state_t *track = allocator->track;
324 read_status_t *rs = &track->info[0];
326 for ( ; track->num_used; --track->num_used, ++rs ) {
327 if (rs->last == APR_SUCCESS) {
328 /* Somebody should have read this bucket again. */
332 /* ### other status values? */
335 /* num_used was reset. also need to reset the next index. */
336 track->next_index = 0;
341 SERF_DECLARE(void) serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
343 #ifdef SERF_DEBUG_BUCKET_USE
345 /* Just reset the number used so that we don't examine the info[] */
346 allocator->track->num_used = 0;
347 allocator->track->next_index = 0;
352 SERF_DECLARE(void) serf_debug__bucket_destroy(const serf_bucket_t *bucket)
354 #ifdef SERF_DEBUG_BUCKET_USE
356 track_state_t *track = bucket->allocator->track;
357 read_status_t *rs = find_read_status(track, bucket, 0);
359 if (rs != NULL && rs->last != APR_EOF) {
360 /* The bucket was destroyed before it was read to completion. */
362 /* Special exception for socket buckets. If a connection remains
363 * open, they are not read to completion.
365 if (SERF_BUCKET_IS_SOCKET(bucket))
368 /* Ditto for SSL Decrypt buckets. */
369 if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
372 /* Ditto for SSL Encrypt buckets. */
373 if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
376 /* Ditto for barrier buckets. */
377 if (SERF_BUCKET_IS_BARRIER(bucket))
387 SERF_DECLARE(void) serf_debug__bucket_alloc_check(
388 serf_bucket_alloc_t *allocator)
390 #ifdef SERF_DEBUG_BUCKET_USE
391 if (allocator->num_alloc != 0) {