Tcl Source Code

Check-in [ce2cb0ea68]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:small opts
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | mig-alloc-reform
Files: files | file ages | folders
SHA1: ce2cb0ea687bb24919cb74b4da8278d7a5596a71
User & Date: mig 2011-03-21 11:42:06
Context
2011-03-21
11:42
some cleanup re obj deletion check-in: ca6e8a9e5e user: mig tags: mig-alloc-reform
11:42
small opts check-in: ce2cb0ea68 user: mig tags: mig-alloc-reform
2011-03-20
11:42
merge trunk to feature branch check-in: 3560c1f392 user: mig tags: mig-alloc-reform
Changes
Hide Diffs Unified Diffs Ignore Whitespace Patch

Changes to generic/tclAlloc.c.

624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644

645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660

661
662
663
664
665
666
667
    /*
     * Increment the requested size to include room for the Block structure.
     * Call malloc() directly if the required amount is greater than the
     * largest block, otherwise pop the smallest block large enough,
     * allocating more blocks if necessary.
     */

    blockPtr = NULL;
    size = reqSize + OFFSET;
#if RCHECK
    size++;
#endif
    if (size > MAXALLOC) {
	bucket = nBuckets;
	blockPtr = malloc(size);
#ifdef ZIPPY_STATS
	if (blockPtr != NULL) {
	    cachePtr->totalAssigned += reqSize;
	}
#endif
    } else {

	bucket = 0;
	while (bucketInfo[bucket].blockSize < size) {
	    bucket++;
	}
	if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
	    blockPtr = cachePtr->buckets[bucket].firstPtr;
	    cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
	    cachePtr->buckets[bucket].numFree--;
#ifdef ZIPPY_STATS
	    cachePtr->buckets[bucket].numRemoves++;
	    cachePtr->buckets[bucket].totalAssigned += reqSize;
#endif
	}
    }
    if (blockPtr == NULL) {
	return NULL;

    }
    return Block2Ptr(blockPtr, bucket, reqSize);
}

/*
 *----------------------------------------------------------------------
 *







<













>













<
|
|
>







624
625
626
627
628
629
630

631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657

658
659
660
661
662
663
664
665
666
667
    /*
     * Increment the requested size to include room for the Block structure.
     * Call malloc() directly if the required amount is greater than the
     * largest block, otherwise pop the smallest block large enough,
     * allocating more blocks if necessary.
     */


    size = reqSize + OFFSET;
#if RCHECK
    size++;
#endif
    if (size > MAXALLOC) {
	bucket = nBuckets;
	blockPtr = malloc(size);
#ifdef ZIPPY_STATS
	if (blockPtr != NULL) {
	    cachePtr->totalAssigned += reqSize;
	}
#endif
    } else {
	blockPtr = NULL;
	bucket = 0;
	while (bucketInfo[bucket].blockSize < size) {
	    bucket++;
	}
	if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
	    blockPtr = cachePtr->buckets[bucket].firstPtr;
	    cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
	    cachePtr->buckets[bucket].numFree--;
#ifdef ZIPPY_STATS
	    cachePtr->buckets[bucket].numRemoves++;
	    cachePtr->buckets[bucket].totalAssigned += reqSize;
#endif
	}

	if (blockPtr == NULL) {
	    return NULL;
	}
    }
    return Block2Ptr(blockPtr, bucket, reqSize);
}

/*
 *----------------------------------------------------------------------
 *
690
691
692
693
694
695
696

697

698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713




714
715
716
717
718
719
720
	return;
    }

    if (allocator < aNONE) {
	return free((char *) ptr);
    }


    GETCACHE(cachePtr);


    /*
     * Get the block back from the user pointer and call system free directly
     * for large blocks. Otherwise, push the block back on the bucket and move
     * blocks to the shared cache if there are now too many free.
     */

    blockPtr = Ptr2Block(ptr);
    bucket = blockPtr->sourceBucket;
    if (bucket == nBuckets) {
#ifdef ZIPPY_STATS
	cachePtr->totalAssigned -= blockPtr->reqSize;
#endif
	free(blockPtr);
	return;
    }





#ifdef ZIPPY_STATS
    cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
#endif
    blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
    cachePtr->buckets[bucket].firstPtr = blockPtr;
    cachePtr->buckets[bucket].numFree++;







>

>
















>
>
>
>







690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
	return;
    }

    if (allocator < aNONE) {
	return free((char *) ptr);
    }

#ifdef ZIPPY_STATS
    GETCACHE(cachePtr);
#endif

    /*
     * Get the block back from the user pointer and call system free directly
     * for large blocks. Otherwise, push the block back on the bucket and move
     * blocks to the shared cache if there are now too many free.
     */

    blockPtr = Ptr2Block(ptr);
    bucket = blockPtr->sourceBucket;
    if (bucket == nBuckets) {
#ifdef ZIPPY_STATS
	cachePtr->totalAssigned -= blockPtr->reqSize;
#endif
	free(blockPtr);
	return;
    }

#ifndef ZIPPY_STATS
    GETCACHE(cachePtr);
#endif

#ifdef ZIPPY_STATS
    cachePtr->buckets[bucket].totalAssigned -= blockPtr->reqSize;
#endif
    blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
    cachePtr->buckets[bucket].firstPtr = blockPtr;
    cachePtr->buckets[bucket].numFree++;