Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 2003d1abfa0c20ee77815f0da33e2c1c > files > 43

glibc-2.5-49.el5_5.5.src.rpm

2007-05-21  Jakub Jelinek  <jakub@redhat.com>

	* malloc/hooks.c (public_sET_STATe): Put all chunks into
	unsorted chunks and clear {fd,bk}_nextsize fields of largebin
	chunks.

2007-04-30  Ulrich Drepper  <drepper@redhat.com>
	    Jakub Jelinek  <jakub@redhat.com>

	[BZ #4349]
	* malloc/malloc.c: Keep separate list for first blocks on the bin
	lists with a given size.  This helps skipping over list elements
	we know won't fit in two places.
	Inspired by a patch by Tomash Brechko <tomash.brechko@gmail.com>.

--- libc/malloc/malloc.c	2008-05-09 03:52:41.000000000 -0400
+++ libc/malloc/malloc.c	2008-06-24 14:48:40.000000000 -0400
@@ -1781,6 +1781,10 @@
 
   struct malloc_chunk* fd;         /* double links -- used only if free. */
   struct malloc_chunk* bk;
+
+  /* Only used for large blocks: pointer to next larger size.  */
+  struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
+  struct malloc_chunk* bk_nextsize;
 };
 
 
@@ -1881,7 +1885,7 @@
 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 
 /* The smallest possible chunk */
-#define MIN_CHUNK_SIZE        (sizeof(struct malloc_chunk))
+#define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
 
 /* The smallest size we can malloc is an aligned minimal chunk */
 
@@ -2081,6 +2085,24 @@
   else {                                                               \
     FD->bk = BK;                                                       \
     BK->fd = FD;                                                       \
+    if (!in_smallbin_range (P->size)				       \
+	&& __builtin_expect (P->fd_nextsize != NULL, 0)) {	       \
+      assert (P->fd_nextsize->bk_nextsize == P);		       \
+      assert (P->bk_nextsize->fd_nextsize == P);		       \
+      if (FD->fd_nextsize == NULL) {				       \
+	if (P->fd_nextsize == P)				       \
+	  FD->fd_nextsize = FD->bk_nextsize = FD;		       \
+	else {							       \
+	  FD->fd_nextsize = P->fd_nextsize;			       \
+	  FD->bk_nextsize = P->bk_nextsize;			       \
+	  P->fd_nextsize->bk_nextsize = FD;			       \
+	  P->bk_nextsize->fd_nextsize = FD;			       \
+	}							       \
+      }	else {							       \
+	P->fd_nextsize->bk_nextsize = P->bk_nextsize;		       \
+	P->bk_nextsize->fd_nextsize = P->fd_nextsize;		       \
+      }								       \
+    }								       \
   }                                                                    \
 }
 
@@ -2786,7 +2808,31 @@
         /* lists are sorted */
         assert(p->bk == b ||
                (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
-      }
+
+	if (!in_smallbin_range(size))
+	  {
+	    if (p->fd_nextsize != NULL)
+	      {
+		if (p->fd_nextsize == p)
+		  assert (p->bk_nextsize == p);
+		else
+		  {
+		    if (p->fd_nextsize == first (b))
+		      assert (chunksize (p) < chunksize (p->fd_nextsize));
+		    else
+		      assert (chunksize (p) > chunksize (p->fd_nextsize));
+
+		    if (p == first (b))
+		      assert (chunksize (p) > chunksize (p->bk_nextsize));
+		    else
+		      assert (chunksize (p) < chunksize (p->bk_nextsize));
+		  }
+	      }
+	    else
+	      assert (p->bk_nextsize == NULL);
+	  }
+      } else if (!in_smallbin_range(size))
+	assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
       /* chunk is followed by a legal chain of inuse chunks */
       for (q = next_chunk(p);
            (q != av->top && inuse(q) &&
@@ -4135,6 +4181,11 @@
         unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
         av->last_remainder = remainder;
         remainder->bk = remainder->fd = unsorted_chunks(av);
+	if (!in_smallbin_range(remainder_size))
+	  {
+	    remainder->fd_nextsize = NULL;
+	    remainder->bk_nextsize = NULL;
+	  }
 
         set_head(victim, nb | PREV_INUSE |
 		 (av != &main_arena ? NON_MAIN_ARENA : 0));
@@ -4183,19 +4234,36 @@
           size |= PREV_INUSE;
           /* if smaller than smallest, bypass loop below */
 	  assert((bck->bk->size & NON_MAIN_ARENA) == 0);
-          if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
+	  if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
             fwd = bck;
             bck = bck->bk;
+
+	    victim->fd_nextsize = fwd->fd;
+	    victim->bk_nextsize = fwd->fd->bk_nextsize;
+	    fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
           }
           else {
 	    assert((fwd->size & NON_MAIN_ARENA) == 0);
-            while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
-              fwd = fwd->fd;
-	      assert((fwd->size & NON_MAIN_ARENA) == 0);
-	    }
-            bck = fwd->bk;
+	    while ((unsigned long) size < fwd->size)
+	      {
+		fwd = fwd->fd_nextsize;
+		assert((fwd->size & NON_MAIN_ARENA) == 0);
+	      }
+
+	    if ((unsigned long) size == (unsigned long) fwd->size)
+	      /* Always insert in the second position.  */
+	      fwd = fwd->fd;
+	    else
+	      {
+		victim->fd_nextsize = fwd;
+		victim->bk_nextsize = fwd->bk_nextsize;
+		fwd->bk_nextsize = victim;
+		victim->bk_nextsize->fd_nextsize = victim;
+	      }
+	    bck = fwd->bk;
           }
-        }
+	} else
+	  victim->fd_nextsize = victim->bk_nextsize = victim;
       }
 
       mark_bin(av, victim_index);
@@ -4213,21 +4281,25 @@
 
     /*
       If a large request, scan through the chunks of current bin in
-      sorted order to find smallest that fits.  This is the only step
-      where an unbounded number of chunks might be scanned without doing
-      anything useful with them. However the lists tend to be short.
+      sorted order to find smallest that fits.  Use the skip list for this.
     */
 
     if (!in_smallbin_range(nb)) {
       bin = bin_at(av, idx);
 
       /* skip scan if empty or largest chunk is too small */
-      if ((victim = last(bin)) != bin &&
-          (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
+      if ((victim = first(bin)) != bin &&
+          (unsigned long)(victim->size) >= (unsigned long)(nb)) {
 
+	victim = victim->bk_nextsize;
         while (((unsigned long)(size = chunksize(victim)) <
                 (unsigned long)(nb)))
-          victim = victim->bk;
+          victim = victim->bk_nextsize;
+
+	/* Avoid removing the first entry for a size so that the skip
+	   list does not have to be rerouted.  */
+	if (victim != last(bin) && victim->size == victim->fd->size)
+	  victim = victim->fd;
 
         remainder_size = size - nb;
         unlink(victim, bck, fwd);
@@ -4249,6 +4321,11 @@
 	  remainder->fd = fwd;
 	  bck->fd = remainder;
 	  fwd->bk = remainder;
+	  if (!in_smallbin_range(remainder_size))
+	    {
+	      remainder->fd_nextsize = NULL;
+	      remainder->bk_nextsize = NULL;
+	    }
           set_head(victim, nb | PREV_INUSE |
 		   (av != &main_arena ? NON_MAIN_ARENA : 0));
           set_head(remainder, remainder_size | PREV_INUSE);
@@ -4318,9 +4395,7 @@
         remainder_size = size - nb;
 
         /* unlink */
-        bck = victim->bk;
-        bin->bk = bck;
-        bck->fd = bin;
+        unlink(victim, bck, fwd);
 
         /* Exhaust */
         if (remainder_size < MINSIZE) {
@@ -4345,7 +4420,11 @@
           /* advertise as last remainder */
           if (in_smallbin_range(nb))
             av->last_remainder = remainder;
-
+	  if (!in_smallbin_range(remainder_size))
+	    {
+	      remainder->fd_nextsize = NULL;
+	      remainder->bk_nextsize = NULL;
+	    }
           set_head(victim, nb | PREV_INUSE |
 		   (av != &main_arena ? NON_MAIN_ARENA : 0));
           set_head(remainder, remainder_size | PREV_INUSE);
@@ -4568,8 +4647,13 @@
 
       bck = unsorted_chunks(av);
       fwd = bck->fd;
-      p->bk = bck;
       p->fd = fwd;
+      p->bk = bck;
+      if (!in_smallbin_range(size))
+	{
+	  p->fd_nextsize = NULL;
+	  p->bk_nextsize = NULL;
+	}
       bck->fd = p;
       fwd->bk = p;
 
@@ -4729,6 +4813,11 @@
             unsorted_bin->fd = p;
             first_unsorted->bk = p;
 
+            if (!in_smallbin_range (size)) {
+	      p->fd_nextsize = NULL;
+	      p->bk_nextsize = NULL;
+	    }
+
             set_head(p, size | PREV_INUSE);
             p->bk = unsorted_bin;
             p->fd = first_unsorted;
--- libc/malloc/hooks.c.jj	2005-11-21 10:43:04.000000000 -0500
+++ libc/malloc/hooks.c	2008-05-09 03:54:00.000000000 -0400
@@ -595,8 +595,9 @@ public_sET_STATe(Void_t* msptr)
       assert(ms->av[2*i+3] == 0);
       first(b) = last(b) = b;
     } else {
-      if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
-			  largebin_index(chunksize(ms->av[2*i+3]))==i)) {
+      if(0 &&
+	 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
+			   largebin_index(chunksize(ms->av[2*i+3]))==i))) {
 	first(b) = ms->av[2*i+2];
 	last(b) = ms->av[2*i+3];
 	/* Make sure the links to the bins within the heap are correct.  */
@@ -616,6 +617,17 @@ public_sET_STATe(Void_t* msptr)
       }
     }
   }
+  if (1) {
+    /* Clear fd_nextsize and bk_nextsize fields.  */
+    b = unsorted_chunks(&main_arena)->fd;
+    while (b != unsorted_chunks(&main_arena)) {
+      if (!in_smallbin_range(chunksize(b))) {
+	b->fd_nextsize = NULL;
+	b->bk_nextsize = NULL;
+      }
+      b = b->fd;
+    }
+  }
   mp_.sbrk_base = ms->sbrk_base;
   main_arena.system_mem = ms->sbrked_mem_bytes;
   mp_.trim_threshold = ms->trim_threshold;