@@ -106,13 +106,13 @@ static int sgCheckInternalOffsetRows = 0;
106106int gInAlloc = false ;
107107
108108// This is recalculated from the other parameters
109- static size_t sWorkingMemorySize = 10 *1024 *1024 ;
109+ static size_t sWorkingMemorySize = 50 *1024 *1024 ;
110110
111111#ifdef HXCPP_GC_MOVING
112112// Just not sure what this shold be
113- static size_t sgMaximumFreeSpace = 1024 *1024 *1024 ;
113+ static size_t sgMaximumFreeSpace = 512 *1024 *1024 ;
114114#else
115- static size_t sgMaximumFreeSpace = 1024 *1024 *1024 ;
115+ static size_t sgMaximumFreeSpace = 512 *1024 *1024 ;
116116#endif
117117
118118
@@ -722,7 +722,7 @@ struct HoleRange
722722hx::QuickVec<struct BlockDataInfo *> *gBlockInfo = 0 ;
723723static int gBlockInfoEmptySlots = 0 ;
724724
725- #define FRAG_THRESH 14
725+ #define FRAG_THRESH 20
726726
727727#define ZEROED_NOT 0
728728#define ZEROED_THREAD 1
@@ -3119,11 +3119,11 @@ void VerifyStackRead(int *inBottom, int *inTop)
31193119
31203120// TODO - work out best size based on cache size?
31213121#ifdef HXCPP_GC_BIG_BLOCKS
3122- static int sMinZeroQueueSize = 4 ;
3123- static int sMaxZeroQueueSize = 16 ;
3122+ static int sMinZeroQueueSize = 4 * 2 ;
3123+ static int sMaxZeroQueueSize = 16 * 2 ;
31243124#else
3125- static int sMinZeroQueueSize = 8 ;
3126- static int sMaxZeroQueueSize = 32 ;
3125+ static int sMinZeroQueueSize = 8 * 2 ;
3126+ static int sMaxZeroQueueSize = 32 * 2 ;
31273127#endif
31283128
31293129#define BLOCK_OFSIZE_COUNT 12
@@ -3207,9 +3207,9 @@ class GlobalAllocator
32073207
32083208 void FreeLarge (void *inLarge)
32093209 {
3210- #ifdef HXCPP_TELEMETRY
3210+ #ifdef HXCPP_TELEMETRY
32113211 __hxt_gc_free_large (inLarge);
3212- #endif
3212+ #endif
32133213
32143214 ((unsigned char *)inLarge)[HX_ENDIAN_MARK_ID_BYTE] = 0 ;
32153215 // AllocLarge will not lock this list unless it decides there is a suitable
@@ -3241,7 +3241,8 @@ class GlobalAllocator
32413241
32423242 // Should we force a collect ? - the 'large' data are not considered when allocating objects
32433243 // from the blocks, and can 'pile up' between smalll object allocations
3244- if ((inSize+mLargeAllocated > mLargeAllocForceRefresh ) && sgInternalEnable)
3244+ size_t newThreshold = mLargeAllocForceRefresh * 1 .5f ;
3245+ if ((inSize+mLargeAllocated > newThreshold) && sgInternalEnable)
32453246 {
32463247 #ifdef SHOW_MEM_EVENTS
32473248 // GCLOG("Large alloc causing collection");
@@ -5186,11 +5187,11 @@ class GlobalAllocator
51865187 }
51875188
51885189
5189- bool isFragged = stats.fragScore > mAllBlocks .size ()*FRAG_THRESH;
5190+ bool isFragged = stats.fragScore > mAllBlocks .size ()*FRAG_THRESH* 1.5 ;
51905191 if (doRelease || isFragged || hx::gAlwaysMove )
51915192 {
5192- if (isFragged && sgTimeToNextTableUpdate>3 )
5193- sgTimeToNextTableUpdate = 3 ;
5193+ if (isFragged && sgTimeToNextTableUpdate>1 )
5194+ sgTimeToNextTableUpdate = 1 ;
51945195 calcMoveOrder ( );
51955196
51965197 // Borrow some blocks to ensuure space to defrag into
@@ -5306,7 +5307,7 @@ class GlobalAllocator
53065307 double filled_ratio = (double )mRowsInUse /(double )(mAllBlocksCount *IMMIX_USEFUL_LINES);
53075308 double after_gen = filled_ratio + (1.0 -filled_ratio)*mGenerationalRetainEstimate ;
53085309
5309- if (after_gen<0.75 )
5310+ if (after_gen<0.85 )
53105311 {
53115312 sGcMode = gcmGenerational;
53125313 }
@@ -5315,7 +5316,8 @@ class GlobalAllocator
53155316 sGcMode = gcmFull;
53165317 // What was I thinking here? This breaks #851
53175318 // gByteMarkID |= 0x30;
5318- }
5319+ sgTimeToNextTableUpdate = 10 ;
5320+ }
53195321
53205322 #ifdef SHOW_MEM_EVENTS
53215323 GCLOG (" filled=%.2f%% + estimate = %.2f%% = %.2f%% -> %s\n " ,
0 commit comments