monotone

monotone Mtn Source Tree

Root/botan/mem_pool.cpp

1/*************************************************
2* Pooling Allocator Source File *
3* (C) 1999-2005 The Botan Project *
4*************************************************/
5
6#include <botan/mem_pool.h>
7#include <botan/conf.h>
8#include <botan/util.h>
9#include <algorithm>
10
11namespace Botan {
12
13/*************************************************
14* Pooling_Allocator Constructor *
15*************************************************/
16Pooling_Allocator::Pooling_Allocator(u32bit size) :
17 PREF_SIZE(size ? size : Config::get_u32bit("base/memory_chunk")),
18 ALIGN_TO(16)
19 {
20 if(PREF_SIZE == 0)
21 throw Internal_Error("The base/memory_chunk option is unset");
22 lock = get_mutex();
23 initialized = destroyed = false;
24 defrag_counter = 0;
25 }
26
27/*************************************************
28* Pooling_Allocator Destructor *
29*************************************************/
30Pooling_Allocator::~Pooling_Allocator()
31 {
32 delete lock;
33 if(!initialized)
34 throw Invalid_State("Pooling_Allocator: Was never initialized");
35 if(!destroyed)
36 throw Invalid_State("Pooling_Allocator: Never released memory");
37 }
38
39/*************************************************
40* Allocate some initial buffers *
41*************************************************/
42void Pooling_Allocator::init()
43 {
44 if(PREF_SIZE >= 64 && prealloc_bytes())
45 {
46 u32bit allocated = 0;
47 while(allocated < prealloc_bytes())
48 {
49 void* ptr = 0;
50 try {
51 ptr = alloc_block(PREF_SIZE);
52 allocated += PREF_SIZE;
53 }
54 catch(Exception) {}
55
56 if(!ptr)
57 break;
58
59 real_mem.push_back(Buffer(ptr, PREF_SIZE, false));
60 }
61 }
62
63 initialized = true;
64 }
65
66/*************************************************
67* Free all remaining memory *
68*************************************************/
69void Pooling_Allocator::destroy()
70 {
71 if(!initialized)
72 throw Invalid_State("Pooling_Allocator::destroy(): Never initialized");
73 if(destroyed)
74 throw Invalid_State("Pooling_Allocator::destroy(): Already destroyed");
75
76 destroyed = true;
77 for(u32bit j = 0; j != real_mem.size(); j++)
78 dealloc_block(real_mem[j].buf, real_mem[j].length);
79 }
80
81/*************************************************
82* Buffer Comparison *
83*************************************************/
84bool Pooling_Allocator::is_empty_buffer(const Buffer& block)
85 {
86 return (block.length == 0);
87 }
88
89/*************************************************
90* Return true if these buffers are contiguous *
91*************************************************/
92bool Pooling_Allocator::are_contiguous(const Buffer& a, const Buffer& b)
93 {
94 if((const byte*)a.buf + a.length == (const byte*)b.buf)
95 return true;
96 return false;
97 }
98
99/*************************************************
100* See if two free blocks are from the same block *
101*************************************************/
102bool Pooling_Allocator::same_buffer(Buffer& a, Buffer& b) const
103 {
104 return (find_block(a.buf) == find_block(b.buf));
105 }
106
107/*************************************************
108* Find the block containing this memory *
109*************************************************/
110u32bit Pooling_Allocator::find_block(void* addr) const
111 {
112 for(u32bit j = 0; j != real_mem.size(); j++)
113 {
114 const byte* buf_addr = (const byte*)real_mem[j].buf;
115 if(buf_addr <= (byte*)addr &&
116 (byte*)addr < buf_addr + real_mem[j].length)
117 return j;
118 }
119 throw Internal_Error("Pooling_Allocator::find_block: no buffer found");
120 }
121
122/*************************************************
123* Remove empty buffers from list *
124*************************************************/
125void Pooling_Allocator::remove_empty_buffers(std::vector<Buffer>& list) const
126 {
127 std::vector<Buffer>::iterator empty;
128
129 empty = std::find_if(list.begin(), list.end(), is_empty_buffer);
130 while(empty != list.end())
131 {
132 list.erase(empty);
133 empty = std::find_if(list.begin(), list.end(), is_empty_buffer);
134 }
135 }
136
137/*************************************************
138* Allocation *
139*************************************************/
140void* Pooling_Allocator::allocate(u32bit n) const
141 {
142 struct Memory_Exhaustion : public Exception
143 {
144 Memory_Exhaustion() :
145 Exception("Pooling_Allocator: Ran out of memory") {}
146 };
147
148 if(n == 0) return 0;
149 n = round_up(n, ALIGN_TO);
150
151 Mutex_Holder holder(lock);
152
153 void* new_buf = find_free_block(n);
154 if(new_buf)
155 return alloc_hook(new_buf, n);
156
157 Buffer block;
158 block.length = ((n > PREF_SIZE) ? n : PREF_SIZE);
159 block.buf = get_block(block.length);
160 if(!block.buf)
161 throw Memory_Exhaustion();
162 free_list.push_back(block);
163 if(free_list.size() >= 2)
164 std::inplace_merge(free_list.begin(), free_list.end() - 1,
165 free_list.end());
166
167 new_buf = find_free_block(n);
168 if(new_buf)
169 return alloc_hook(new_buf, n);
170
171 throw Memory_Exhaustion();
172 }
173
174/*************************************************
175* Deallocation *
176*************************************************/
177void Pooling_Allocator::deallocate(void* ptr, u32bit n) const
178 {
179 const u32bit RUNS_TO_DEFRAGS = 16;
180
181 if(ptr == 0 || n == 0) return;
182
183 n = round_up(n, ALIGN_TO);
184 std::memset(ptr, 0, n);
185
186 Mutex_Holder holder(lock);
187
188 dealloc_hook(ptr, n);
189
190 free_list.push_back(Buffer(ptr, n));
191 if(free_list.size() >= 2)
192 std::inplace_merge(free_list.begin(), free_list.end() - 1,
193 free_list.end());
194
195 defrag_counter = (defrag_counter + 1) % RUNS_TO_DEFRAGS;
196 if(defrag_counter == 0)
197 {
198 for(u32bit j = 0; j != free_list.size(); j++)
199 {
200 bool erase = false;
201 if(free_list[j].buf == 0) continue;
202
203 for(u32bit k = 0; k != real_mem.size(); k++)
204 if(free_list[j].buf == real_mem[k].buf &&
205 free_list[j].length == real_mem[k].length)
206 erase = true;
207
208 if(erase)
209 {
210 const u32bit buf = find_block(free_list[j].buf);
211 free_block(real_mem[buf].buf, real_mem[buf].length);
212 free_list[j].buf = 0;
213 free_list[j].length = 0;
214 }
215 }
216
217 remove_empty_buffers(real_mem);
218 defrag_free_list();
219 }
220 }
221
222/*************************************************
223* Handle Allocating New Memory *
224*************************************************/
225void* Pooling_Allocator::get_block(u32bit n) const
226 {
227 for(u32bit j = 0; j != real_mem.size(); j++)
228 if(!real_mem[j].in_use && real_mem[j].length == n)
229 {
230 real_mem[j].in_use = true;
231 return real_mem[j].buf;
232 }
233
234 void* ptr = 0;
235 try {
236 ptr = alloc_block(n);
237 }
238 catch(Exception) {}
239
240 if(ptr)
241 real_mem.push_back(Buffer(ptr, n, true));
242 return ptr;
243 }
244
245/*************************************************
246* Handle Deallocating Memory *
247*************************************************/
248void Pooling_Allocator::free_block(void* ptr, u32bit n) const
249 {
250 if(!ptr) return;
251
252 u32bit free_space = 0;
253 for(u32bit j = 0; j != real_mem.size(); j++)
254 if(!real_mem[j].in_use)
255 free_space += real_mem[j].length;
256
257 bool free_this_block = false;
258 if(free_space > keep_free())
259 free_this_block = true;
260
261 for(u32bit j = 0; j != real_mem.size(); j++)
262 if(real_mem[j].buf == ptr)
263 {
264 if(!real_mem[j].in_use || real_mem[j].length != n)
265 throw Internal_Error("Pooling_Allocator: Size mismatch in free");
266
267 if(free_this_block)
268 {
269 dealloc_block(real_mem[j].buf, real_mem[j].length);
270 real_mem[j].buf = 0;
271 real_mem[j].length = 0;
272 }
273 else
274 real_mem[j].in_use = false;
275
276 return;
277 }
278
279 throw Internal_Error("Pooling_Allocator: Unknown pointer was freed");
280 }
281
282/*************************************************
283* Defragment the free list *
284*************************************************/
285void Pooling_Allocator::defrag_free_list() const
286 {
287 if(free_list.size() < 2) return;
288
289 for(u32bit j = 0; j != free_list.size(); j++)
290 {
291 if(free_list[j].length == 0)
292 continue;
293
294 if(j > 0 &&
295 are_contiguous(free_list[j-1], free_list[j]) &&
296 same_buffer(free_list[j-1], free_list[j]))
297 {
298 free_list[j].buf = free_list[j-1].buf;
299 free_list[j].length += free_list[j-1].length;
300 free_list[j-1].length = 0;
301 }
302
303 if(j < free_list.size() - 1 &&
304 are_contiguous(free_list[j], free_list[j+1]) &&
305 same_buffer(free_list[j], free_list[j+1]))
306 {
307 free_list[j+1].buf = free_list[j].buf;
308 free_list[j+1].length += free_list[j].length;
309 free_list[j].length = 0;
310 }
311 }
312 remove_empty_buffers(free_list);
313 }
314
315/*************************************************
316* Find a block on the free list *
317*************************************************/
318void* Pooling_Allocator::find_free_block(u32bit n) const
319 {
320 void* retval = 0;
321
322 for(u32bit j = 0; j != free_list.size(); j++)
323 if(free_list[j].length >= n)
324 {
325 retval = free_list[j].buf;
326
327 if(free_list[j].length == n)
328 free_list.erase(free_list.begin() + j);
329 else if(free_list[j].length > n)
330 {
331 free_list[j].length -= n;
332 free_list[j].buf = ((byte*)free_list[j].buf) + n;
333 }
334 break;
335 }
336
337 return retval;
338 }
339
340/*************************************************
341* Allocation hook for debugging *
342*************************************************/
343void* Pooling_Allocator::alloc_hook(void* ptr, u32bit) const
344 {
345 return ptr;
346 }
347
348/*************************************************
349* Deallocation hook for debugging *
350*************************************************/
351void Pooling_Allocator::dealloc_hook(void*, u32bit) const
352 {
353 }
354
355/*************************************************
356* Run internal consistency checks *
357*************************************************/
358void Pooling_Allocator::consistency_check() const
359 {
360 for(u32bit j = 0; j != free_list.size(); j++)
361 {
362 const byte* byte_buf = (const byte*)free_list[j].buf;
363 const u32bit length = free_list[j].length;
364
365 for(u32bit k = 0; k != length; k++)
366 if(byte_buf[k])
367 throw Internal_Error("Pooling_Allocator: free list corrupted");
368 }
369 }
370
371}

Archive Download this file

Branches

Tags

Quick Links:     www.monotone.ca    -     Downloads    -     Documentation    -     Wiki    -     Code Forge    -     Build Status