afsmonitor: avoid double free on exit
[openafs.git] / src / mcas / gc.c
index 6023722..4268178 100644 (file)
@@ -34,6 +34,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <afsconfig.h>
+#include <afs/param.h>
+
 #include <assert.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -42,6 +45,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include <unistd.h>
 #include "portable_defns.h"
 #include "gc.h"
+
 #include <afs/afsutil.h>
 
 /*#define MINIMAL_GC*/
@@ -50,7 +54,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 /* Recycled nodes are filled with this value if WEAK_MEM_ORDER. */
 #define INVALID_BYTE 0
-#define INITIALISE_NODES(_p,_c) memset((_p), INVALID_BYTE, (_c));
+#define INITIALISE_NODES(_p,_c) memset((_p), INVALID_BYTE, (_c))
 
 /* Number of unique block sizes we can deal with. Equivalently, the
  * number of unique object caches which can be created. */
@@ -125,6 +129,9 @@ static struct gc_global_st
     int nr_sizes;
     int blk_sizes[MAX_SIZES];
 
+    /* tags (trace support) */
+    char *tags[MAX_SIZES];
+
     /* Registered epoch hooks. */
     int nr_hooks;
     hook_fn_t hook_fns[MAX_HOOKS];
@@ -195,6 +202,9 @@ static chunk_t *alloc_more_chunks(void)
     int i;
     chunk_t *h, *p;
 
+    ViceLog(11, ("GC: alloc_more_chunks alloc %lu chunks\n",
+        CHUNKS_PER_ALLOC));
+
     h = p = ALIGNED_ALLOC(CHUNKS_PER_ALLOC * sizeof(*h));
     if ( h == NULL ) MEM_FAIL(CHUNKS_PER_ALLOC * sizeof(*h));
 
@@ -347,10 +357,14 @@ static void gc_reclaim(void)
     unsigned long curr_epoch;
     chunk_t      *ch, *t;
     int           two_ago, three_ago, i, j;
+
+    ViceLog(11, ("GC: gc_reclaim enter\n"));
+
     /* Barrier to entering the reclaim critical section. */
     if ( gc_global.inreclaim || CASIO(&gc_global.inreclaim, 0, 1) ) return;
 
+    ViceLog(11, ("GC: gc_reclaim after inreclaim barrier\n"));
+
     /*
      * Grab first ptst structure *before* barrier -- prevent bugs
      * on weak-ordered architectures.
@@ -365,6 +379,9 @@ static void gc_reclaim(void)
         if ( (ptst->count > 1) && (ptst->gc->epoch != curr_epoch) ) goto out;
     }
 
+
+    ViceLog(11, ("GC: gc_reclaim all-threads see current epoch\n"));
+
     /*
      * Three-epoch-old garbage lists move to allocation lists.
      * Two-epoch-old garbage lists are cleaned out.
@@ -400,6 +417,30 @@ static void gc_reclaim(void)
             gc->garbage_tail[three_ago][i]->next = ch;
             gc->garbage_tail[three_ago][i] = t;
             t->next = t;
+
+                       /* gc inst: compute and log size of returned list */
+                       {
+                               chunk_t *ch_head, *ch_next;
+                               int r_ix, r_len, r_size;
+                               r_ix = 0;
+                               r_len = 0;
+                               r_size = 0;
+
+                               /* XXX: nonfatal, may be missing multiplier */
+                               ch_head = ch;
+                           do {
+                                       r_len++;
+                               } while (ch->next && (ch->next != ch_head)
+                                                && (ch_next = ch->next));
+
+                               ViceLog(11, ("GC: return %d chunks of size %d to "
+                                                        "gc_global.alloc[%d]\n",
+                                                        r_len,
+                                                        gc_global.blk_sizes[i],
+                                                        i));
+                       }
+
+
             add_chunks_to_list(ch, gc_global.alloc[i]);
         }
 
@@ -414,11 +455,32 @@ static void gc_reclaim(void)
             do { for ( j = 0; j < t->i; j++ ) fn(our_ptst, t->blk[j]); }
             while ( (t = t->next) != ch );
 
+                       /* gc inst: compute and log size of returned list */
+                       {
+                               chunk_t *ch_head, *ch_next;
+                               int r_ix, r_len, r_size;
+                               r_ix = 0;
+                               r_len = 0;
+
+                               /* XXX: nonfatal, may be missing multiplier */
+                               ch_head = ch;
+                           do {
+                                       r_len++;
+                               } while (ch->next && (ch->next != ch_head)
+                                                && (ch_next = ch->next));
+
+                               ViceLog(11, ("GC: return %d chunks to gc_global.free_chunks\n",
+                                                        r_len));
+                       }
+
             add_chunks_to_list(ch, gc_global.free_chunks);
         }
     }
 
     /* Update current epoch. */
+    ViceLog(11, ("GC: gc_reclaim epoch transition (leaving %lu)\n",
+                                curr_epoch));
+
     WMB();
     gc_global.current = (curr_epoch+1) % NR_EPOCHS;
 
@@ -461,6 +523,12 @@ gc_get_blocksize(int alloc_id)
     return (gc_global.blk_sizes[alloc_id]);
 }
 
+char *
+gc_get_tag(int alloc_id)
+{
+    return (gc_global.tags[alloc_id]);
+}
+
 static chunk_t *chunk_from_cache(gc_t *gc)
 {
     chunk_t *ch = gc->chunk_cache, *p = ch->next;
@@ -631,7 +699,7 @@ gc_t *gc_init(void)
 
 
 int
-gc_add_allocator(int alloc_size)
+gc_add_allocator(int alloc_size, char *tag)
 {
     int ni, i;
 
@@ -649,6 +717,7 @@ gc_add_allocator(int alloc_size)
     while ((ni = CASIO(&gc_global.nr_sizes, i, i + 1)) != i)
        i = ni;
     gc_global.blk_sizes[i] = alloc_size;
+    gc_global.tags[i] = strdup(tag);
     gc_global.alloc_size[i] = ALLOC_CHUNKS_PER_LIST;
     gc_global.alloc[i] = get_filled_chunks(ALLOC_CHUNKS_PER_LIST, alloc_size);
     return i;