this repo has no description
1/* -*- mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- */
2/*
3 * Main authors:
4 * Christian Schulte <schulte@gecode.org>
5 *
6 * Contributing authors:
7 * Guido Tack <tack@gecode.org>
8 *
9 * Bugfixes provided by:
10 * Zandra Norman
11 *
12 * Copyright:
13 * Christian Schulte, 2002
14 * Guido Tack, 2004
15 *
16 * This file is part of Gecode, the generic constraint
17 * development environment:
18 * http://www.gecode.org
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 *
39 */
40
41#include <memory>
42
43namespace Gecode { namespace Kernel {
44
45 /// Memory chunk with size information
46 class MemoryChunk {
47 public:
48 /// Next chunk
49 MemoryChunk* next;
50 /// Size of chunk
51 size_t size;
52 };
53
54 /// Memory chunk allocated from heap with proper alignment
55 class HeapChunk : public MemoryChunk {
56 public:
57 /// Start of memory area inside chunk
58 double area[1];
59 };
60
61 /// Shared object for several memory areas
62 class SharedMemory {
63 private:
64 /// The components for shared heap memory
65 struct {
66 /// How many heap chunks are available for caching
67 unsigned int n_hc;
68 /// A list of cached heap chunks
69 HeapChunk* hc;
70 } heap;
71 /// A mutex for access
72 GECODE_KERNEL_EXPORT static Support::Mutex& m(void);
73 public:
74 /// Initialize
75 SharedMemory(void);
76 /// Destructor
77 ~SharedMemory(void);
78 /// \name Heap management
79 //@
80 /// Return heap chunk, preferable of size \a s, but at least of size \a l
81 HeapChunk* alloc(size_t s, size_t l);
82 /// Free heap chunk (or cache for later)
83 void free(HeapChunk* hc);
84 //@}
85 };
86
87
88}}
89
90namespace Gecode {
91
92 /**
93 * \brief Base-class for freelist-managed objects
94 *
95 * Freelist-managed object must inherit from this class. The size
96 * of objects of subclasses is defined by the parameters in
97 * Gecode::MemoryConfig.
98 * \ingroup FuncMemSpace
99 */
100 class FreeList {
101 protected:
102 /// Pointer to next freelist object
103 FreeList* _next;
104 public:
105 /// Use uninitialized
106 FreeList(void);
107 /// Initialize with next freelist object \a n
108 FreeList(FreeList* n);
109 /// Return next freelist object
110 FreeList* next(void) const;
111 /// Return pointer to next link in freelist object
112 FreeList** nextRef(void);
113 /// Set next freelist object to \a n
114 void next(FreeList* n);
115 };
116
117}
118
119namespace Gecode { namespace Kernel {
120
121 /// Manage memory for space
122 class MemoryManager {
123 public:
124 /// Constructor initialization
125 MemoryManager(SharedMemory& sm);
126 /// Constructor during cloning \a mm with shared memory \a sm and for a memory area for subscriptions of size \a s_sub
127 MemoryManager(SharedMemory& sm, MemoryManager& mm, size_t s_sub);
128 /// Release all allocated heap chunks
129 void release(SharedMemory& sm);
130
131 private:
132 size_t cur_hcsz; ///< Current heap chunk size
133 HeapChunk* cur_hc; ///< Current heap chunk
134 size_t requested; ///< Total amount of heap memory requested
135
136 char* start; ///< Start of current heap area used for allocation
137 size_t lsz; ///< Size left for allocation
138
139 /// Refill current heap area (outlined) issued by request of size \a s
140 GECODE_KERNEL_EXPORT
141 void alloc_refill(SharedMemory& sm, size_t s);
142 /// Do the real work for refilling
143 void alloc_fill(SharedMemory& sm, size_t s, bool first);
144
145 public:
146 /// Allocate memory of size \a s
147 void* alloc(SharedMemory& sm, size_t s);
148 /// Get the memory area for subscriptions
149 void* subscriptions(void) const;
150
151 private:
152 /// Start of free lists
153 FreeList* fl[MemoryConfig::fl_size_max-MemoryConfig::fl_size_min+1];
154 /// Refill free list
155 template<size_t> void fl_refill(SharedMemory& sm);
156 /// Translate size to index in free list
157 static size_t sz2i(size_t);
158 /// Translate index in free list to size
159 static size_t i2sz(size_t);
160
161 public:
162 /// Allocate free list element of size \a s
163 template<size_t s>
164 void* fl_alloc(SharedMemory& sm);
165 /// Release all free list elements of size \a s between f and l (inclusive)
166 template<size_t> void fl_dispose(FreeList* f, FreeList* l);
167
168 private:
169 /// Slack memory chunks
170 MemoryChunk* slack;
171 public:
172 /// Store for reusal, if of sufficient size for free list
173 void reuse(void* p, size_t s);
174 };
175
176}}
177
178namespace Gecode { namespace Kernel {
179
180 /*
181 * Shared memory area
182 *
183 */
184
185 forceinline
186 SharedMemory::SharedMemory(void) {
187 heap.n_hc = 0;
188 heap.hc = nullptr;
189 }
190 forceinline
191 SharedMemory::~SharedMemory(void) {
192 while (heap.hc != nullptr) {
193 HeapChunk* hc = heap.hc;
194 heap.hc = static_cast<HeapChunk*>(hc->next);
195 Gecode::heap.rfree(hc);
196 }
197 }
198
199 forceinline HeapChunk*
200 SharedMemory::alloc(size_t s, size_t l) {
201 // To protect from exceptions from heap.ralloc()
202 Support::Lock guard(m());
203 while ((heap.hc != nullptr) && (heap.hc->size < l)) {
204 heap.n_hc--;
205 HeapChunk* hc = heap.hc;
206 heap.hc = static_cast<HeapChunk*>(hc->next);
207 Gecode::heap.rfree(hc);
208 }
209 HeapChunk* hc;
210 if (heap.hc == nullptr) {
211 assert(heap.n_hc == 0);
212 hc = static_cast<HeapChunk*>(Gecode::heap.ralloc(s));
213 hc->size = s;
214 } else {
215 heap.n_hc--;
216 hc = heap.hc;
217 heap.hc = static_cast<HeapChunk*>(hc->next);
218 }
219 return hc;
220 }
221 forceinline void
222 SharedMemory::free(HeapChunk* hc) {
223 Support::Lock guard(m());
224 if (heap.n_hc == MemoryConfig::n_hc_cache) {
225 Gecode::heap.rfree(hc);
226 } else {
227 heap.n_hc++;
228 hc->next = heap.hc; heap.hc = hc;
229 }
230 }
231
232
233}}
234
235namespace Gecode {
236
237 /*
238 * Freelists
239 *
240 */
241
242 forceinline
243 FreeList::FreeList(void) {}
244
245 forceinline
246 FreeList::FreeList(FreeList* n)
247 : _next(n) {}
248
249 forceinline FreeList*
250 FreeList::next(void) const {
251 return _next;
252 }
253
254 forceinline FreeList**
255 FreeList::nextRef(void) {
256 return &_next;
257 }
258
259 forceinline void
260 FreeList::next(FreeList* n) {
261 _next = n;
262 }
263
264}
265
266namespace Gecode { namespace Kernel {
267
268 /*
269 * The active memory manager
270 *
271 */
272
273 forceinline size_t
274 MemoryManager::sz2i(size_t s) {
275 assert(s >= (MemoryConfig::fl_size_min << MemoryConfig::fl_unit_size));
276 assert(s <= (MemoryConfig::fl_size_max << MemoryConfig::fl_unit_size));
277 return (s >> MemoryConfig::fl_unit_size) - MemoryConfig::fl_size_min;
278 }
279
280 forceinline size_t
281 MemoryManager::i2sz(size_t i) {
282 return (i + MemoryConfig::fl_size_min) << MemoryConfig::fl_unit_size;
283 }
284
285
286 forceinline void*
287 MemoryManager::alloc(SharedMemory& sm, size_t sz) {
288 assert(sz > 0);
289 // Perform alignment
290 MemoryConfig::align(sz);
291 // Check whether sufficient memory left
292 if (sz > lsz)
293 alloc_refill(sm,sz);
294 lsz -= sz;
295 return start + lsz;
296 }
297
298 forceinline void*
299 MemoryManager::subscriptions(void) const {
300 return &cur_hc->area[0];
301 }
302
303 forceinline void
304 MemoryManager::alloc_fill(SharedMemory& sm, size_t sz, bool first) {
305 // Adjust current heap chunk size
306 if (((requested > MemoryConfig::hcsz_inc_ratio*cur_hcsz) ||
307 (sz > cur_hcsz)) &&
308 (cur_hcsz < MemoryConfig::hcsz_max) &&
309 !first) {
310 cur_hcsz <<= 1;
311 }
312 // Increment the size that it caters for the initial overhead
313 size_t overhead = sizeof(HeapChunk) - sizeof(double);
314 sz += overhead;
315 // Round size to next multiple of current heap chunk size
316 size_t allocate = ((sz > cur_hcsz) ?
317 (static_cast<size_t>(sz / cur_hcsz) + 1) * cur_hcsz
318 : cur_hcsz);
319 // Request a chunk of preferably size allocate, but at least size sz
320 HeapChunk* hc = sm.alloc(allocate,sz);
321 start = ptr_cast<char*>(&hc->area[0]);
322 lsz = hc->size - overhead;
323 // Link heap chunk, where the first heap chunk is kept in place
324 if (first) {
325 requested = hc->size;
326 hc->next = nullptr; cur_hc = hc;
327 } else {
328 requested += hc->size;
329 hc->next = cur_hc->next; cur_hc->next = hc;
330 }
331#ifdef GECODE_MEMORY_CHECK
332 for (char* c = start; c < (start+lsz); c++)
333 *c = 0;
334#endif
335 }
336
337 forceinline
338 MemoryManager::MemoryManager(SharedMemory& sm)
339 : cur_hcsz(MemoryConfig::hcsz_min), requested(0), slack(nullptr) {
340 alloc_fill(sm,cur_hcsz,true);
341 for (size_t i = 0; i<MemoryConfig::fl_size_max-MemoryConfig::fl_size_min+1;
342 i++)
343 fl[i] = nullptr;
344 }
345
346 forceinline
347 MemoryManager::MemoryManager(SharedMemory& sm, MemoryManager& mm,
348 size_t s_sub)
349 : cur_hcsz(mm.cur_hcsz), requested(0), slack(nullptr) {
350 MemoryConfig::align(s_sub);
351 if ((mm.requested < MemoryConfig::hcsz_dec_ratio*mm.cur_hcsz) &&
352 (cur_hcsz > MemoryConfig::hcsz_min) &&
353 (s_sub*2 < cur_hcsz))
354 cur_hcsz >>= 1;
355 alloc_fill(sm,cur_hcsz+s_sub,true);
356 // Skip the memory area at the beginning for subscriptions
357 lsz -= s_sub;
358 start += s_sub;
359 for (size_t i = 0; i<MemoryConfig::fl_size_max-MemoryConfig::fl_size_min+1;
360 i++)
361 fl[i] = nullptr;
362 }
363
364 forceinline void
365 MemoryManager::release(SharedMemory& sm) {
366 // Release all allocated heap chunks
367 HeapChunk* hc = cur_hc;
368 do {
369 HeapChunk* t = hc; hc = static_cast<HeapChunk*>(hc->next);
370 sm.free(t);
371 } while (hc != nullptr);
372 }
373
374
375
376 /*
377 * Slack memory management
378 *
379 */
380 forceinline void
381 MemoryManager::reuse(void* p, size_t s) {
382#ifdef GECODE_MEMORY_CHECK
383 {
384 char* c = static_cast<char*>(p);
385 char* e = c + s;
386 while (c < e) {
387 *c = 0; c++;
388 }
389 }
390#endif
391
392 size_t aligned_s = s;
393 void* aligned_p = std::align(std::min(static_cast<size_t>(GECODE_MEMORY_ALIGNMENT), s),
394 MemoryConfig::fl_size_min<<MemoryConfig::fl_unit_size,
395 p, aligned_s);
396
397 if (aligned_p==nullptr ||
398 aligned_s < (MemoryConfig::fl_size_min<<MemoryConfig::fl_unit_size))
399 return;
400 if (aligned_s > (MemoryConfig::fl_size_max<<MemoryConfig::fl_unit_size)) {
401 MemoryChunk* rc = static_cast<MemoryChunk*>(aligned_p);
402 rc->next = slack;
403 rc->size = aligned_s;
404 slack = rc;
405 } else {
406 size_t i = sz2i(aligned_s);
407 FreeList* f = static_cast<FreeList*>(aligned_p);
408 f->next(fl[i]); fl[i]=f;
409 }
410 }
411
412
413 /*
414 * Freelist management
415 *
416 */
417
418 template<size_t s>
419 forceinline void*
420 MemoryManager::fl_alloc(SharedMemory& sm) {
421 size_t i = sz2i(s);
422 FreeList* f = fl[i];
423 if (f == nullptr) {
424 fl_refill<s>(sm); f = fl[i];
425 }
426 FreeList* n = f->next();
427 fl[i] = n;
428 return f;
429 }
430
431 template<size_t s>
432 forceinline void
433 MemoryManager::fl_dispose(FreeList* f, FreeList* l) {
434 size_t i = sz2i(s);
435#ifdef GECODE_AUDIT
436 FreeList* cur = f;
437 while (cur != l) {
438 assert(cur->next());
439 cur = cur->next();
440 }
441#endif
442 l->next(fl[i]); fl[i] = f;
443 }
444
445 template<size_t sz>
446 void
447 MemoryManager::fl_refill(SharedMemory& sm) {
448 // Try to acquire memory from slack
449 if (slack != nullptr) {
450 MemoryChunk* m = slack;
451 slack = nullptr;
452 do {
453 char* block = ptr_cast<char*>(m);
454 size_t s = m->size;
455 assert(s >= sz);
456 m = m->next;
457 fl[sz2i(sz)] = ptr_cast<FreeList*>(block);
458 while (s >= 2*sz) {
459 ptr_cast<FreeList*>(block)->next(ptr_cast<FreeList*>(block+sz));
460 block += sz;
461 s -= sz;
462 }
463 ptr_cast<FreeList*>(block)->next(nullptr);
464 } while (m != nullptr);
465 } else {
466 char* block = static_cast<char*>(alloc(sm,MemoryConfig::fl_refill*sz));
467 fl[sz2i(sz)] = ptr_cast<FreeList*>(block);
468 int i = MemoryConfig::fl_refill-2;
469 do {
470 ptr_cast<FreeList*>(block+static_cast<unsigned int>(i)*sz)
471 ->next(ptr_cast<FreeList*>(block+
472 (static_cast<unsigned int>(i)+1)*sz));
473 } while (--i >= 0);
474 ptr_cast<FreeList*>(block+
475 (MemoryConfig::fl_refill-1)*sz)->next
476 (ptr_cast<FreeList*>(nullptr));
477 }
478 }
479
480}}
481
482// STATISTICS: kernel-memory