1 | /* |
---|
2 | * |
---|
3 | * Copyright (c) 2007-2016 The University of Waikato, Hamilton, New Zealand. |
---|
4 | * All rights reserved. |
---|
5 | * |
---|
6 | * This file is part of libtrace. |
---|
7 | * |
---|
8 | * This code has been developed by the University of Waikato WAND |
---|
9 | * research group. For further information please see http://www.wand.net.nz/ |
---|
10 | * |
---|
11 | * libtrace is free software; you can redistribute it and/or modify |
---|
12 | * it under the terms of the GNU Lesser General Public License as published by |
---|
13 | * the Free Software Foundation; either version 3 of the License, or |
---|
14 | * (at your option) any later version. |
---|
15 | * |
---|
16 | * libtrace is distributed in the hope that it will be useful, |
---|
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
19 | * GNU Lesser General Public License for more details. |
---|
20 | * |
---|
21 | * You should have received a copy of the GNU Lesser General Public License |
---|
22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
23 | * |
---|
24 | * |
---|
25 | */ |
---|
26 | #include "config.h" |
---|
27 | #include "object_cache.h" |
---|
28 | #include <assert.h> |
---|
29 | #include <stdio.h> |
---|
30 | #include <stdlib.h> |
---|
31 | #include <string.h> |
---|
32 | |
---|
33 | |
---|
34 | // pthread tls is most likely slower than __thread, but they have destructors so |
---|
35 | // we use a combination of the two here!! |
---|
36 | // Note Apples implementation of TLS means that memory is not available / has |
---|
37 | // been zeroed by the time the pthread destructor is called. |
---|
38 | struct local_cache { |
---|
39 | libtrace_ocache_t *oc; |
---|
40 | size_t total; |
---|
41 | size_t used; |
---|
42 | void **cache; |
---|
43 | bool invalid; |
---|
44 | }; |
---|
45 | |
---|
46 | struct mem_stats { |
---|
47 | struct memfail { |
---|
48 | uint64_t cache_hit; |
---|
49 | uint64_t ring_hit; |
---|
50 | uint64_t miss; |
---|
51 | uint64_t recycled; |
---|
52 | } readbulk, read, write, writebulk; |
---|
53 | }; |
---|
54 | |
---|
55 | #ifdef ENABLE_MEM_STATS |
---|
56 | extern __thread struct mem_stats mem_hits; |
---|
57 | #endif |
---|
58 | |
---|
59 | struct local_caches { |
---|
60 | size_t t_mem_caches_used; |
---|
61 | size_t t_mem_caches_total; |
---|
62 | struct local_cache *t_mem_caches; |
---|
63 | }; |
---|
64 | |
---|
65 | static pthread_key_t memory_destructor_key; |
---|
66 | static pthread_once_t memory_destructor_once = PTHREAD_ONCE_INIT; |
---|
67 | static inline struct local_caches *get_local_caches(); |
---|
68 | |
---|
69 | /** |
---|
70 | * @brief unregister_thread assumes we DONT hold spin |
---|
71 | */ |
---|
72 | static inline void unregister_thread(struct local_cache *lc) { |
---|
73 | size_t i; |
---|
74 | if (lc->invalid) |
---|
75 | fprintf(stderr, "Already free'd the thread cache!!\n"); |
---|
76 | pthread_spin_lock(&lc->oc->spin); |
---|
77 | // Remove it from our thread list |
---|
78 | for (i=0; i < lc->oc->nb_thread_list; ++i) { |
---|
79 | if (lc->oc->thread_list[i] == lc) { |
---|
80 | --lc->oc->nb_thread_list; |
---|
81 | lc->oc->thread_list[i] = lc->oc->thread_list[lc->oc->nb_thread_list]; |
---|
82 | lc->oc->thread_list[lc->oc->nb_thread_list] = NULL; |
---|
83 | i = ~0U; |
---|
84 | break; |
---|
85 | } |
---|
86 | } |
---|
87 | if (i != ~0U) { |
---|
88 | fprintf(stderr, "Attempted to unregistered a thread with an" |
---|
89 | " ocache that had never registered this thread. Ignoring.\n"); |
---|
90 | pthread_spin_unlock(&lc->oc->spin); |
---|
91 | return; |
---|
92 | } |
---|
93 | lc->invalid = true; |
---|
94 | |
---|
95 | if (lc->oc->max_allocations) { |
---|
96 | libtrace_ringbuffer_swrite_bulk(&lc->oc->rb, lc->cache, lc->used, lc->used); |
---|
97 | } else { |
---|
98 | size_t i; |
---|
99 | // We just run the free these |
---|
100 | for(i = 0; i < lc->used; ++i) { |
---|
101 | lc->oc->free(lc->cache[i]); |
---|
102 | } |
---|
103 | } |
---|
104 | pthread_spin_unlock(&lc->oc->spin); |
---|
105 | } |
---|
106 | |
---|
107 | /** |
---|
108 | * @brief register_thread assumes we DONT hold spin |
---|
109 | */ |
---|
110 | static inline void register_thread(libtrace_ocache_t *oc, struct local_cache *lc) { |
---|
111 | lc->invalid = false; |
---|
112 | pthread_spin_lock(&oc->spin); |
---|
113 | if (oc->nb_thread_list == oc->max_nb_thread_list) { |
---|
114 | oc->max_nb_thread_list += 0x10; |
---|
115 | oc->thread_list = realloc(oc->thread_list, sizeof(void*) * oc->max_nb_thread_list); |
---|
116 | } |
---|
117 | oc->thread_list[oc->nb_thread_list] = lc; |
---|
118 | ++oc->nb_thread_list; |
---|
119 | pthread_spin_unlock(&oc->spin); |
---|
120 | } |
---|
121 | |
---|
122 | static void destroy_memory_caches(void *tlsaddr) { |
---|
123 | size_t a; |
---|
124 | struct local_caches *lcs = tlsaddr; |
---|
125 | |
---|
126 | for (a = 0; a < lcs->t_mem_caches_used; ++a) { |
---|
127 | unregister_thread(&lcs->t_mem_caches[a]); |
---|
128 | // Write these all back to the main buffer, this might have issues we would want to free these |
---|
129 | free(lcs->t_mem_caches[a].cache); |
---|
130 | } |
---|
131 | free(lcs->t_mem_caches); |
---|
132 | lcs->t_mem_caches = NULL; |
---|
133 | free(lcs); |
---|
134 | |
---|
135 | } |
---|
136 | |
---|
137 | static void once_memory_cache_key_init() { |
---|
138 | ASSERT_RET(pthread_key_create(&memory_destructor_key, &destroy_memory_caches), == 0); |
---|
139 | } |
---|
140 | |
---|
141 | /** |
---|
142 | * Adds more space to our mem_caches |
---|
143 | */ |
---|
144 | static void resize_memory_caches(struct local_caches *lcs) { |
---|
145 | if (lcs->t_mem_caches_total <= 0) { |
---|
146 | fprintf(stderr, "Expected lcs->t_mem_caches_total to be greater or equal to 0 in resize_memory_caches()\n"); |
---|
147 | return; |
---|
148 | } |
---|
149 | lcs->t_mem_caches += 0x10; |
---|
150 | lcs->t_mem_caches = realloc(lcs->t_mem_caches, |
---|
151 | lcs->t_mem_caches_total * sizeof(struct local_cache)); |
---|
152 | } |
---|
153 | |
---|
154 | /* Get TLS for the list of local_caches */ |
---|
155 | static inline struct local_caches *get_local_caches() { |
---|
156 | #if HAVE_TLS |
---|
157 | static __thread struct local_caches *lcs = NULL; |
---|
158 | if (lcs) { |
---|
159 | return lcs; |
---|
160 | } |
---|
161 | #else |
---|
162 | struct local_caches *lcs; |
---|
163 | pthread_once(&memory_destructor_once, &once_memory_cache_key_init); |
---|
164 | if ((lcs=pthread_getspecific(memory_destructor_key)) != 0) { |
---|
165 | return lcs; |
---|
166 | } |
---|
167 | #endif |
---|
168 | else { |
---|
169 | /* This thread has not been used with a memory pool before */ |
---|
170 | /* Allocate our TLS */ |
---|
171 | if (lcs) { |
---|
172 | fprintf(stderr, "Expected lcs to be NULL in get_local_caches()\n"); |
---|
173 | return NULL; |
---|
174 | } |
---|
175 | lcs = calloc(1, sizeof (struct local_caches)); |
---|
176 | if (!lcs) { |
---|
177 | fprintf(stderr, "Unable to allocate memory for lcs in get_local_caches()\n"); |
---|
178 | return NULL; |
---|
179 | } |
---|
180 | /* Hook into pthreads to destroy this when the thread ends */ |
---|
181 | pthread_once(&memory_destructor_once, &once_memory_cache_key_init); |
---|
182 | pthread_setspecific(memory_destructor_key, (void *) lcs); |
---|
183 | lcs->t_mem_caches_total = 0x10; |
---|
184 | lcs->t_mem_caches = calloc(0x10, sizeof(struct local_cache)); |
---|
185 | if (!lcs->t_mem_caches) { |
---|
186 | fprintf(stderr, "Unable to allocate memory for lcs->t_mem_caches in get_local_caches()\n"); |
---|
187 | return NULL; |
---|
188 | } |
---|
189 | return lcs; |
---|
190 | } |
---|
191 | } |
---|
192 | |
---|
193 | static inline struct local_cache * find_cache(libtrace_ocache_t *oc) { |
---|
194 | size_t i; |
---|
195 | struct local_cache *lc = NULL; |
---|
196 | struct local_caches *lcs = get_local_caches(); |
---|
197 | |
---|
198 | for (i = 0; i < lcs->t_mem_caches_used; ++i) { |
---|
199 | if (lcs->t_mem_caches[i].oc == oc) { |
---|
200 | lc = &lcs->t_mem_caches[i]; |
---|
201 | break; |
---|
202 | } |
---|
203 | } |
---|
204 | |
---|
205 | if (!oc->thread_cache_size) |
---|
206 | return 0; |
---|
207 | |
---|
208 | // Create a cache |
---|
209 | if (!lc) { |
---|
210 | if (lcs->t_mem_caches_used == lcs->t_mem_caches_total) |
---|
211 | resize_memory_caches(lcs); |
---|
212 | lcs->t_mem_caches[lcs->t_mem_caches_used].oc = oc; |
---|
213 | lcs->t_mem_caches[lcs->t_mem_caches_used].used = 0; |
---|
214 | lcs->t_mem_caches[lcs->t_mem_caches_used].total = oc->thread_cache_size; |
---|
215 | lcs->t_mem_caches[lcs->t_mem_caches_used].cache = malloc(sizeof(void*) * oc->thread_cache_size); |
---|
216 | lcs->t_mem_caches[lcs->t_mem_caches_used].invalid = false; |
---|
217 | lc = &lcs->t_mem_caches[lcs->t_mem_caches_used]; |
---|
218 | // Register it with the underlying ring_buffer |
---|
219 | register_thread(lc->oc, lc); |
---|
220 | ++lcs->t_mem_caches_used; |
---|
221 | } |
---|
222 | |
---|
223 | if (lc->invalid) { |
---|
224 | fprintf(stderr, "lc cache is invalid in find_cache()\n"); |
---|
225 | return NULL; |
---|
226 | } |
---|
227 | return lc; |
---|
228 | } |
---|
229 | |
---|
230 | /** |
---|
231 | * Creates a object cache, that is a pool of dynamically allocated and recycled |
---|
232 | * objects of a fixed size. This should be faster than malloc and free. |
---|
233 | * The alloc and free methods are supplied by the user and are used when no |
---|
234 | * recycled objects are available, or to tidy the final results. |
---|
235 | * |
---|
236 | * The performance of these pools will decrease if thread caches are used |
---|
237 | * as this results in a list to lookup per thread. The pool is added when |
---|
238 | * to this list when first encountered, these persist untill the thread exits. |
---|
239 | * |
---|
240 | * NOTE: If limit_size is true do not attempt to 'free' any objects that were |
---|
241 | * not created by this pool back otherwise the 'free' might deadlock. Also |
---|
242 | * be cautious when picking the buffer size, upto thread_cache_size*(threads-1) |
---|
243 | * could be unusable at any given time if these are stuck in thread local caches. |
---|
244 | * |
---|
245 | * @param oc A pointer to the object cache structure which is to be initialised. |
---|
246 | * @param alloc The allocation method, must not be NULL. [void *alloc()] |
---|
247 | * @param free The free method used to destroy packets. [void free(void * obj)] |
---|
248 | * @param thread_cache_size A small cache kept on a per thread basis, this can be 0 |
---|
249 | * however should only be done if bulk reads of packets are being performed |
---|
250 | * or contention is minimal. |
---|
251 | * @param buffer_size The number of packets to be stored in the main buffer. |
---|
252 | * @param limit_size If true no more objects than buffer_size will be allocated, |
---|
253 | * reads will block (free never should).Otherwise packets can be freely |
---|
254 | * allocated upon requested and are free'd if there is not enough space for them. |
---|
255 | * @return If successful returns 0 otherwise -1. |
---|
256 | */ |
---|
257 | DLLEXPORT int libtrace_ocache_init(libtrace_ocache_t *oc, void *(*alloc)(void), |
---|
258 | void (*free)(void *), |
---|
259 | size_t thread_cache_size, |
---|
260 | size_t buffer_size, bool limit_size) { |
---|
261 | |
---|
262 | if (buffer_size <= 0) { |
---|
263 | fprintf(stderr, "NULL buffer_size passed into libtrace_ocache_init()\n"); |
---|
264 | return -1; |
---|
265 | } |
---|
266 | if (!alloc) { |
---|
267 | fprintf(stderr, "NULL alloc passed into libtrace_ocache_init()\n"); |
---|
268 | return -1; |
---|
269 | } |
---|
270 | if (!free) { |
---|
271 | fprintf(stderr, "NULL free method passed into libtrace_ocache_init()\n"); |
---|
272 | return -1; |
---|
273 | } |
---|
274 | if (libtrace_ringbuffer_init(&oc->rb, buffer_size, LIBTRACE_RINGBUFFER_BLOCKING) != 0) { |
---|
275 | return -1; |
---|
276 | } |
---|
277 | oc->alloc = alloc; |
---|
278 | oc->free = free; |
---|
279 | oc->current_allocations = 0; |
---|
280 | oc->thread_cache_size = thread_cache_size; |
---|
281 | oc->nb_thread_list = 0; |
---|
282 | oc->max_nb_thread_list = 0x10; |
---|
283 | oc->thread_list = calloc(0x10, sizeof(void*)); |
---|
284 | if (oc->thread_list == NULL) { |
---|
285 | libtrace_ringbuffer_destroy(&oc->rb); |
---|
286 | return -1; |
---|
287 | } |
---|
288 | pthread_spin_init(&oc->spin, 0); |
---|
289 | if (limit_size) |
---|
290 | oc->max_allocations = buffer_size; |
---|
291 | else |
---|
292 | oc->max_allocations = 0; |
---|
293 | return 0; |
---|
294 | } |
---|
295 | |
---|
296 | /** |
---|
297 | * Destroys the object cache. Call this only once all memory has |
---|
298 | * been free'd back and no more accesses will be made. |
---|
299 | * |
---|
300 | * @return Returns the number of packets outstanding, or extra object recevied |
---|
301 | * Ideally this should be zero (0) otherwise some form of memory leak |
---|
302 | * is likely present. Currenty only implemented in the case limit_size |
---|
303 | * is true. |
---|
304 | */ |
---|
305 | DLLEXPORT int libtrace_ocache_destroy(libtrace_ocache_t *oc) { |
---|
306 | void *ele; |
---|
307 | |
---|
308 | while (oc->nb_thread_list) |
---|
309 | unregister_thread(oc->thread_list[0]); |
---|
310 | |
---|
311 | pthread_spin_lock(&oc->spin); |
---|
312 | while (libtrace_ringbuffer_try_read(&oc->rb, &ele)) { |
---|
313 | oc->free(ele); |
---|
314 | if (oc->max_allocations) |
---|
315 | --oc->current_allocations; |
---|
316 | } |
---|
317 | pthread_spin_unlock(&oc->spin); |
---|
318 | |
---|
319 | if (oc->current_allocations) |
---|
320 | fprintf(stderr, "OCache destroyed, leaking %d packets!!\n", (int) oc->current_allocations); |
---|
321 | |
---|
322 | libtrace_ringbuffer_destroy(&oc->rb); |
---|
323 | pthread_spin_destroy(&oc->spin); |
---|
324 | free(oc->thread_list); |
---|
325 | libtrace_zero_ocache(oc); |
---|
326 | if (oc->current_allocations) |
---|
327 | return (int) oc->current_allocations; |
---|
328 | else |
---|
329 | return 0; |
---|
330 | } |
---|
331 | |
---|
332 | static inline size_t libtrace_ocache_alloc_cache(libtrace_ocache_t *oc, void *values[], size_t nb_buffers, size_t min_nb_buffers, |
---|
333 | struct local_cache *lc) { |
---|
334 | libtrace_ringbuffer_t *rb = &oc->rb; |
---|
335 | size_t i; |
---|
336 | |
---|
337 | // We have enough cached!! Yay |
---|
338 | if (nb_buffers <= lc->used) { |
---|
339 | // Copy all from cache |
---|
340 | memcpy(values, &lc->cache[lc->used - nb_buffers], sizeof(void *) * nb_buffers); |
---|
341 | lc->used -= nb_buffers; |
---|
342 | #ifdef ENABLE_MEM_STATS |
---|
343 | mem_hits.read.cache_hit += nb_buffers; |
---|
344 | mem_hits.readbulk.cache_hit += 1; |
---|
345 | #endif |
---|
346 | return nb_buffers; |
---|
347 | } |
---|
348 | // Cache is not big enough try read all from ringbuffer |
---|
349 | else if (nb_buffers > lc->total) { |
---|
350 | i = libtrace_ringbuffer_sread_bulk(rb, values, nb_buffers, min_nb_buffers); |
---|
351 | #ifdef ENABLE_MEM_STATS |
---|
352 | if (i) |
---|
353 | mem_hits.readbulk.ring_hit += 1; |
---|
354 | else |
---|
355 | mem_hits.readbulk.miss += 1; |
---|
356 | mem_hits.read.ring_hit += i; |
---|
357 | #endif |
---|
358 | } else { // Not enough cached |
---|
359 | // Empty the cache and re-fill it and then see what we're left with |
---|
360 | i = lc->used; |
---|
361 | memcpy(values, lc->cache, sizeof(void *) * lc->used); |
---|
362 | #ifdef ENABLE_MEM_STATS |
---|
363 | mem_hits.read.cache_hit += i; |
---|
364 | #endif |
---|
365 | |
---|
366 | // Make sure we still meet the minimum requirement |
---|
367 | if (i < min_nb_buffers) |
---|
368 | lc->used = libtrace_ringbuffer_sread_bulk(rb, lc->cache, lc->total, min_nb_buffers - i); |
---|
369 | else |
---|
370 | lc->used = libtrace_ringbuffer_sread_bulk(rb, lc->cache, lc->total, 0); |
---|
371 | #ifdef ENABLE_MEM_STATS |
---|
372 | if (lc->used == lc->total) |
---|
373 | mem_hits.readbulk.ring_hit += 1; |
---|
374 | else |
---|
375 | mem_hits.readbulk.miss += 1; |
---|
376 | mem_hits.read.ring_hit += lc->used; |
---|
377 | #endif |
---|
378 | } |
---|
379 | |
---|
380 | // Try fill the remaining |
---|
381 | if (i < nb_buffers && lc->used) { |
---|
382 | size_t remaining; |
---|
383 | remaining = MIN(lc->used, nb_buffers - i); |
---|
384 | memcpy(&values[i], &lc->cache[lc->used - remaining], sizeof(void *) * remaining); |
---|
385 | lc->used -= remaining; |
---|
386 | i += remaining; |
---|
387 | } |
---|
388 | #ifdef ENABLE_MEM_STATS |
---|
389 | mem_hits.read.miss += nb_buffers - i; |
---|
390 | #endif |
---|
391 | if (i < min_nb_buffers) { |
---|
392 | fprintf(stderr, "Unable to fill remaining cache in libtrace_ocache_alloc_cache()\n"); |
---|
393 | return ~0U; |
---|
394 | } |
---|
395 | return i; |
---|
396 | } |
---|
397 | |
---|
398 | DLLEXPORT size_t libtrace_ocache_alloc(libtrace_ocache_t *oc, void *values[], size_t nb_buffers, size_t min_nb_buffers) { |
---|
399 | struct local_cache *lc = find_cache(oc); |
---|
400 | size_t i; |
---|
401 | size_t min; |
---|
402 | bool try_alloc = !(oc->max_allocations && oc->max_allocations <= oc->current_allocations); |
---|
403 | |
---|
404 | if (oc->max_allocations) { |
---|
405 | if(nb_buffers >= oc->max_allocations) { |
---|
406 | fprintf(stderr, "Expected nb_buffers to be less than or equal to the object cache " |
---|
407 | "max allocation in libtrace_ocache_alloc()\n"); |
---|
408 | return ~0U; |
---|
409 | } |
---|
410 | } |
---|
411 | min = try_alloc ? 0: min_nb_buffers; |
---|
412 | if (lc) |
---|
413 | i = libtrace_ocache_alloc_cache(oc, values, nb_buffers, min, lc); |
---|
414 | else |
---|
415 | i = libtrace_ringbuffer_sread_bulk(&oc->rb, values, nb_buffers, min); |
---|
416 | |
---|
417 | if (try_alloc) { |
---|
418 | size_t nb; |
---|
419 | |
---|
420 | // Try alloc the rest |
---|
421 | if (oc->max_allocations) { |
---|
422 | pthread_spin_lock(&oc->spin); |
---|
423 | nb = MIN(oc->max_allocations - oc->current_allocations, nb_buffers - i); |
---|
424 | oc->current_allocations += nb; |
---|
425 | pthread_spin_unlock(&oc->spin); |
---|
426 | nb += i; |
---|
427 | } else { |
---|
428 | nb = nb_buffers; |
---|
429 | } |
---|
430 | |
---|
431 | for (;i < nb; ++i) { |
---|
432 | values[i] = (*oc->alloc)(); |
---|
433 | if (!values[i]) { |
---|
434 | fprintf(stderr, "Unable to alloc memory for values[%zu] in libtrace_ocache_alloc()\n", i); |
---|
435 | return ~0U; |
---|
436 | } |
---|
437 | } |
---|
438 | |
---|
439 | if (i != nb) { |
---|
440 | fprintf(stderr, "Expected i == nb in libtrace_ocache_alloc()\n"); |
---|
441 | return ~0U; |
---|
442 | } |
---|
443 | // Still got to wait for more |
---|
444 | if (nb < min_nb_buffers) { |
---|
445 | if (lc) |
---|
446 | i += libtrace_ocache_alloc_cache(oc, &values[nb], nb_buffers - nb, min_nb_buffers - nb, lc); |
---|
447 | else |
---|
448 | i += libtrace_ringbuffer_sread_bulk(&oc->rb, &values[nb], nb_buffers - nb, min_nb_buffers - nb); |
---|
449 | } |
---|
450 | } |
---|
451 | if (i < min_nb_buffers) { |
---|
452 | fprintf(stderr, "Failed to allocate minimum number of buffers for libtrace " |
---|
453 | "object cache in libtrace_ocache_alloc()\n"); |
---|
454 | return ~0U; |
---|
455 | } |
---|
456 | return i; |
---|
457 | } |
---|
458 | |
---|
459 | |
---|
460 | static inline size_t libtrace_ocache_free_cache(libtrace_ocache_t *oc, void *values[], size_t nb_buffers, size_t min_nb_buffers, |
---|
461 | struct local_cache *lc) { |
---|
462 | libtrace_ringbuffer_t *rb = &oc->rb; |
---|
463 | size_t i; |
---|
464 | |
---|
465 | // We have enough cached!! Yay |
---|
466 | if (nb_buffers <= lc->total - lc->used) { |
---|
467 | // Copy all to the cache |
---|
468 | memcpy(&lc->cache[lc->used], values, sizeof(void *) * nb_buffers); |
---|
469 | lc->used += nb_buffers; |
---|
470 | #ifdef ENABLE_MEM_STATS |
---|
471 | mem_hits.write.cache_hit += nb_buffers; |
---|
472 | mem_hits.writebulk.cache_hit += 1; |
---|
473 | #endif |
---|
474 | return nb_buffers; |
---|
475 | } |
---|
476 | // Cache is not big enough try write all to the ringbuffer |
---|
477 | else if (nb_buffers > lc->total) { |
---|
478 | i = libtrace_ringbuffer_swrite_bulk(rb, values, nb_buffers, min_nb_buffers); |
---|
479 | #ifdef ENABLE_MEM_STATS |
---|
480 | if (i) |
---|
481 | mem_hits.writebulk.ring_hit += 1; |
---|
482 | else |
---|
483 | mem_hits.writebulk.miss += 1; |
---|
484 | mem_hits.write.ring_hit += i; |
---|
485 | #endif |
---|
486 | } else { // Not enough cache space but there might later |
---|
487 | // Fill the cache and empty it and then see what we're left with |
---|
488 | i = (lc->total - lc->used); |
---|
489 | memcpy(&lc->cache[lc->used], values, sizeof(void *) * i); |
---|
490 | #ifdef ENABLE_MEM_STATS |
---|
491 | mem_hits.write.cache_hit += i; |
---|
492 | #endif |
---|
493 | |
---|
494 | // Make sure we still meet the minimum requirement |
---|
495 | if (i < min_nb_buffers) |
---|
496 | lc->used = lc->total - libtrace_ringbuffer_swrite_bulk(rb, lc->cache, lc->total, min_nb_buffers - i); |
---|
497 | else |
---|
498 | lc->used = lc->total - libtrace_ringbuffer_swrite_bulk(rb, lc->cache, lc->total, 0); |
---|
499 | |
---|
500 | // Re originise fulls to the front |
---|
501 | if (lc->used) |
---|
502 | memmove(lc->cache, &lc->cache[lc->total - lc->used], sizeof(void *) * lc->used); |
---|
503 | |
---|
504 | #ifdef ENABLE_MEM_STATS |
---|
505 | if (lc->used) |
---|
506 | mem_hits.writebulk.miss += 1; |
---|
507 | else |
---|
508 | mem_hits.writebulk.ring_hit += 1; |
---|
509 | mem_hits.write.ring_hit += lc->total - lc->used; |
---|
510 | #endif |
---|
511 | } |
---|
512 | |
---|
513 | // Try empty the remaining |
---|
514 | if (i < nb_buffers && lc->used != lc->total) { |
---|
515 | size_t remaining; |
---|
516 | remaining = MIN(lc->total - lc->used, nb_buffers - i); |
---|
517 | memcpy(&lc->cache[lc->used], &values[i], sizeof(void *) * remaining); |
---|
518 | lc->used += remaining; |
---|
519 | i += remaining; |
---|
520 | } |
---|
521 | #ifdef ENABLE_MEM_STATS |
---|
522 | mem_hits.write.miss += nb_buffers - i; |
---|
523 | #endif |
---|
524 | return i; |
---|
525 | } |
---|
526 | |
---|
527 | DLLEXPORT size_t libtrace_ocache_free(libtrace_ocache_t *oc, void *values[], size_t nb_buffers, size_t min_nb_buffers) { |
---|
528 | struct local_cache *lc = find_cache(oc); |
---|
529 | size_t i; |
---|
530 | size_t min; |
---|
531 | |
---|
532 | if (oc->max_allocations) { |
---|
533 | if(nb_buffers >= oc->max_allocations) { |
---|
534 | fprintf(stderr, "Expected nb_buffers to be less than or equal to the object cache " |
---|
535 | "max allocation in libtrace_ocache_alloc()\n"); |
---|
536 | return ~0U; |
---|
537 | } |
---|
538 | } |
---|
539 | min = oc->max_allocations ? min_nb_buffers : 0; |
---|
540 | if (lc) |
---|
541 | i = libtrace_ocache_free_cache(oc, values, nb_buffers, min, lc); |
---|
542 | else |
---|
543 | i = libtrace_ringbuffer_swrite_bulk(&oc->rb, values, nb_buffers, min); |
---|
544 | |
---|
545 | if (!oc->max_allocations) { |
---|
546 | // Free these normally |
---|
547 | for (;i < min_nb_buffers; ++i) { |
---|
548 | oc->free(values[i]); |
---|
549 | } |
---|
550 | } |
---|
551 | return i; |
---|
552 | } |
---|
553 | |
---|
554 | DLLEXPORT void libtrace_zero_ocache(libtrace_ocache_t *oc) { |
---|
555 | libtrace_zero_ringbuffer(&oc->rb); |
---|
556 | oc->thread_cache_size = 0; |
---|
557 | oc->alloc = NULL; |
---|
558 | oc->free = NULL; |
---|
559 | oc->current_allocations = 0; |
---|
560 | oc->max_allocations = 0; |
---|
561 | oc->nb_thread_list = 0; |
---|
562 | oc->max_nb_thread_list = 0; |
---|
563 | oc->thread_list = NULL; |
---|
564 | } |
---|
565 | |
---|
566 | /** |
---|
567 | * @brief ocache_unregister_thread removes a thread from an ocache. |
---|
568 | * @param The ocache to remove this thread, this will free any packets in the TLS cache |
---|
569 | */ |
---|
570 | DLLEXPORT void libtrace_ocache_unregister_thread(libtrace_ocache_t *oc) { |
---|
571 | size_t i; |
---|
572 | struct local_caches *lcs = get_local_caches(); |
---|
573 | struct local_cache *lc = find_cache(oc); |
---|
574 | |
---|
575 | if (lc) { |
---|
576 | for (i = 0; i < lcs->t_mem_caches_used; ++i) { |
---|
577 | if (&lcs->t_mem_caches[i] == lc) { |
---|
578 | // Free the cache against the ocache |
---|
579 | unregister_thread(&lcs->t_mem_caches[i]); |
---|
580 | free(lcs->t_mem_caches[i].cache); |
---|
581 | // And remove it from the thread itself |
---|
582 | --lcs->t_mem_caches_used; |
---|
583 | lcs->t_mem_caches[i] = lcs->t_mem_caches[lcs->t_mem_caches_used]; |
---|
584 | memset(&lcs->t_mem_caches[lcs->t_mem_caches_used], 0, sizeof(struct local_cache)); |
---|
585 | } |
---|
586 | } |
---|
587 | } |
---|
588 | } |
---|