structmalloc_chunk{INTERNAL_SIZE_Tmchunk_prev_size;/* Size of previous chunk (if free). */INTERNAL_SIZE_Tmchunk_size;/* Size in bytes, including overhead. */structmalloc_chunk*fd;/* double links -- used only if free. */structmalloc_chunk*bk;/* Only used for large blocks: pointer to next larger size. */structmalloc_chunk*fd_nextsize;/* double links -- used only if free. */structmalloc_chunk*bk_nextsize;};
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of chunk, in bytes |A|M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| User data starts here... .
. .
. (malloc_usable_size() bytes) .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (size of chunk, but used for application data) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of next chunk, in bytes |A|0|1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
structmalloc_state{/* Serialize access. */__libc_lock_define(,mutex);/* Flags (formerly in max_fast). */intflags;/* Set if the fastbin chunks contain recently inserted free blocks. *//* Note this is a bool but not all targets support atomics on booleans. */inthave_fastchunks;/* Fastbins */mfastbinptrfastbinsY[NFASTBINS];/* Base of the topmost chunk -- not otherwise kept in a bin */mchunkptrtop;/* The remainder from the most recent split of a small request */mchunkptrlast_remainder;/* Normal bins packed as described above */mchunkptrbins[NBINS*2-2];/* Bitmap of bins */unsignedintbinmap[BINMAPSIZE];/* Linked list */structmalloc_state*next;/* Linked list for free arenas. Access to this field is serialized
by free_list_lock in arena.c. */structmalloc_state*next_free;/* Number of threads attached to this arena. 0 if the arena is on
the free list. Access to this field is serialized by
free_list_lock in arena.c. */INTERNAL_SIZE_Tattached_threads;/* Memory allocated from the system in this arena. */INTERNAL_SIZE_Tsystem_mem;INTERNAL_SIZE_Tmax_system_mem;};
/* There are several instances of this struct ("arenas") in this
malloc. If you are adapting this malloc in a way that does NOT use
a static or mmapped malloc_state, you MUST explicitly zero-fill it
before using. This malloc relies on the property that malloc_state
is initialized to all zeroes (as is true of C statics). */staticstructmalloc_statemain_arena={.mutex=_LIBC_LOCK_INITIALIZER,.next=&main_arena,.attached_threads=1};
arena的扩展
arena如何扩展? 在malloc里面我们已经见过arena_get函数, 如下:
1
2
3
4
5
6
7
8
9
10
11
#define arena_get(ptr, size) do { \
ptr = thread_arena; \
arena_lock (ptr, size); \
} while (0)
#define arena_lock(ptr, size) do { \
if (ptr) \
__libc_lock_lock (ptr->mutex); \
else \
ptr = arena_get2 ((size), NULL); \
} while (0)
/* Add the new arena to the global list. */a->next=main_arena.next;/* FIXME: The barrier is an attempt to synchronize with read access
in reused_arena, which does not acquire list_lock while
traversing the list. */atomic_write_barrier();main_arena.next=a;
/* FIXME: Access to next_to_use suffers from data races. */staticmstatenext_to_use;if(next_to_use==NULL)next_to_use=&main_arena;
移动到尾部节点:
1
2
3
4
5
6
7
8
9
10
11
/* Iterate over all arenas (including those linked from
free_list). */result=next_to_use;do{if(!__libc_lock_trylock(result->mutex))gotoout;/* FIXME: This is a data race, see _int_new_arena. */result=result->next;}while(result!=next_to_use);
/* Attach the arena to the current thread. */{/* Update the arena thread attachment counters. */mstatereplaced_arena=thread_arena;__libc_lock_lock(free_list_lock);detach_arena(replaced_arena);/* We may have picked up an arena on the free list. We need to
preserve the invariant that no arena on the free list has a
positive attached_threads counter (otherwise,
arena_thread_freeres cannot use the counter to determine if the
arena needs to be put on the free list). We unconditionally
remove the selected arena from the free list. The caller of
reused_arena checked the free list and observed it to be empty,
so the list is very short. */remove_from_free_list(result);++result->attached_threads;__libc_lock_unlock(free_list_lock);}
void__malloc_arena_thread_freeres(void){/* Shut down the thread cache first. This could deallocate data for
the thread arena, so do this before we put the arena on the free
list. */tcache_thread_shutdown();mstatea=thread_arena;thread_arena=NULL;if(a!=NULL){__libc_lock_lock(free_list_lock);/* If this was the last attached thread for this arena, put the
arena on the free list. */assert(a->attached_threads>0);if(--a->attached_threads==0){a->next_free=free_list;free_list=a;}__libc_lock_unlock(free_list_lock);}}
p=mem2chunk(mem);if(chunk_is_mmapped(p))/* release mmapped memory. */{/* See if the dynamic brk/mmap threshold needs adjusting.
Dumped fake mmapped chunks do not affect the threshold. */if(!mp_.no_dyn_threshold&&chunksize_nomask(p)>mp_.mmap_threshold&&chunksize_nomask(p)<=DEFAULT_MMAP_THRESHOLD_MAX&&!DUMPED_MAIN_ARENA_CHUNK(p)){mp_.mmap_threshold=chunksize(p);mp_.trim_threshold=2*mp_.mmap_threshold;LIBC_PROBE(memory_mallopt_free_dyn_thresholds,2,mp_.mmap_threshold,mp_.trim_threshold);}munmap_chunk(p);return;}
如果不是mmap的内存, 而是从malloc_state获取的:
1
2
ar_ptr=arena_for_chunk(p);_int_free(ar_ptr,p,0);
现在走到_int_free里面:
1
2
3
4
5
6
7
8
9
10
11
if((unsignedlong)(size)<=(unsignedlong)(get_max_fast()){//...
unsignedintidx=fastbin_index(size);fb=&fastbin(av,idx);/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */mchunkptrold=*fb,old2;//...
p->fd=old2=old;//...
}
if(!in_smallbin_range(nb)){/* skip scan if empty or largest chunk is too small */if((victim=first(bin))!=bin&&(unsignedlong)chunksize_nomask(victim)>=(unsignedlong)(nb)){victim=victim->bk_nextsize;while(((unsignedlong)(size=chunksize(victim))<(unsignedlong)(nb)))victim=victim->bk_nextsize;//...
if(remainder_size<MINSIZE){//...
}/* Split */else{//...
}//...
}}
victim=av->top;size=chunksize(victim);if(__glibc_unlikely(size>av->system_mem))malloc_printerr("malloc(): corrupted top size");if((unsignedlong)(size)>=(unsignedlong)(nb+MINSIZE)){remainder_size=size-nb;remainder=chunk_at_offset(victim,nb);av->top=remainder;set_head(victim,nb|PREV_INUSE|(av!=&main_arena?NON_MAIN_ARENA:0));set_head(remainder,remainder_size|PREV_INUSE);check_malloced_chunk(av,victim,nb);void*p=chunk2mem(victim);alloc_perturb(p,bytes);returnp;}/* When we are using atomic ops to free fast chunks we can get
here for all block sizes. */elseif(atomic_load_relaxed(&av->have_fastchunks)){malloc_consolidate(av);/* restore original bin index */if(in_smallbin_range(nb))idx=smallbin_index(nb);elseidx=largebin_index(nb);}/*
Otherwise, relay to handle system-dependent cases
*/else{void*p=sysmalloc(nb,av);if(p!=NULL)alloc_perturb(p,bytes);returnp;}