Changeset 98937 in webkit
- Timestamp:
- Oct 31, 2011, 11:43:37 PM (14 years ago)
- Location:
- trunk/Source
- Files:
-
- 20 edited
-
JavaScriptCore/ChangeLog (modified) (1 diff)
-
JavaScriptCore/JavaScriptCore.exp (modified) (3 diffs)
-
JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def (modified) (1 diff)
-
JavaScriptCore/heap/Heap.cpp (modified) (8 diffs)
-
JavaScriptCore/heap/Heap.h (modified) (3 diffs)
-
JavaScriptCore/heap/MarkStack.cpp (modified) (4 diffs)
-
JavaScriptCore/heap/MarkStack.h (modified) (11 diffs)
-
JavaScriptCore/heap/MarkedBlock.h (modified) (2 diffs)
-
JavaScriptCore/heap/SlotVisitor.h (modified) (1 diff)
-
JavaScriptCore/heap/WeakReferenceHarvester.h (modified) (2 diffs)
-
JavaScriptCore/runtime/Heuristics.cpp (modified) (3 diffs)
-
JavaScriptCore/runtime/Heuristics.h (modified) (1 diff)
-
JavaScriptCore/wtf/Atomics.h (modified) (2 diffs)
-
JavaScriptCore/wtf/Bitmap.h (modified) (10 diffs)
-
JavaScriptCore/wtf/MainThread.h (modified) (2 diffs)
-
JavaScriptCore/wtf/Platform.h (modified) (1 diff)
-
JavaScriptCore/wtf/ThreadSpecific.h (modified) (2 diffs)
-
JavaScriptCore/wtf/mac/MainThreadMac.mm (modified) (5 diffs)
-
WebCore/ChangeLog (modified) (1 diff)
-
WebCore/platform/TreeShared.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r98932 r98937 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 1 117 2011-10-31 Mark Hahnenberg <mhahnenberg@apple.com> 2 118 -
trunk/Source/JavaScriptCore/JavaScriptCore.exp
r98932 r98937 103 103 __ZN3JSC10HandleHeap12writeBarrierEPNS_7JSValueERKS1_ 104 104 __ZN3JSC10HandleHeap4growEv 105 106 105 107 __ZN3JSC10Identifier11addSlowCaseEPNS_12JSGlobalDataEPN3WTF10StringImplE 106 108 __ZN3JSC10Identifier11addSlowCaseEPNS_9ExecStateEPN3WTF10StringImplE … … 337 339 __ZN3JSC9CodeBlockD1Ev 338 340 __ZN3JSC9CodeBlockD2Ev 341 339 342 __ZN3JSC9MarkStack8validateEPNS_6JSCellE 340 343 __ZN3JSC9Structure21addPropertyTransitionERNS_12JSGlobalDataEPS0_RKNS_10IdentifierEjPNS_6JSCellERm … … 456 459 __ZN3WTF22cancelCallOnMainThreadEPFvPvES0_ 457 460 __ZN3WTF22charactersToUIntStrictEPKtmPbi 461 458 462 __ZN3WTF23callOnMainThreadAndWaitEPFvPvES0_ 459 463 __ZN3WTF23dayInMonthFromDayInYearEib -
trunk/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
r98932 r98937 163 163 ?evaluate@JSC@@YA?AVJSValue@1@PAVExecState@1@PAVScopeChainNode@1@ABVSourceCode@1@V21@PAV21@@Z 164 164 ?exclude@Profile@JSC@@QAEXPBVProfileNode@2@@Z 165 ?expand@MarkStackArray@JSC@@ QAEXXZ165 ?expand@MarkStackArray@JSC@@AEXXZ 166 166 ?fastCalloc@WTF@@YAPAXII@Z 167 167 ?fastFree@WTF@@YAXPAX@Z -
trunk/Source/JavaScriptCore/heap/Heap.cpp
r98365 r98937 33 33 #include "Tracing.h" 34 34 #include <algorithm> 35 35 36 36 37 … … 316 317 , m_activityCallback(DefaultGCActivityCallback::create(this)) 317 318 , m_machineThreads(this) 318 , m_slotVisitor(globalData->jsArrayVPtr, globalData->jsFinalObjectVPtr, globalData->jsStringVPtr) 319 , m_sharedData(globalData) 320 , m_slotVisitor(m_sharedData, globalData->jsArrayVPtr, globalData->jsFinalObjectVPtr, globalData->jsStringVPtr) 319 321 , m_handleHeap(globalData) 320 322 , m_isSafeToCollect(false) … … 325 327 m_numberOfFreeBlocks = 0; 326 328 m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree"); 329 327 330 ASSERT(m_blockFreeingThread); 328 331 } … … 330 333 Heap::~Heap() 331 334 { 332 // destroy our thread335 // 333 336 { 334 337 MutexLocker locker(m_freeBlockLock); … … 337 340 } 338 341 waitForThreadCompletion(m_blockFreeingThread, 0); 339 342 340 343 // The destroy function must already have been called, so assert this. 341 344 ASSERT(!m_globalData); … … 590 593 HeapRootVisitor heapRootVisitor(visitor); 591 594 595 596 592 597 #if ENABLE(GGC) 593 {594 size_t dirtyCellCount = dirtyCells.size();595 GCPHASE(VisitDirtyCells);596 GCCOUNTER(DirtyCellCount, dirtyCellCount);597 for (size_t i = 0; i < dirtyCellCount; i++) {598 heapRootVisitor.visitChildren(dirtyCells[i]);599 visitor.drain();600 }601 }598 { 599 size_t dirtyCellCount = dirtyCells.size(); 600 GCPHASE(VisitDirtyCells); 601 GCCOUNTER(DirtyCellCount, dirtyCellCount); 602 for (size_t i = 0; i < dirtyCellCount; i++) { 603 heapRootVisitor.visitChildren(dirtyCells[i]); 604 rain(); 605 } 606 } 602 607 #endif 603 608 604 if (m_globalData->codeBlocksBeingCompiled.size()) { 605 GCPHASE(VisitActiveCodeBlock); 606 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) 607 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); 608 } 609 610 { 611 GCPHASE(VisitMachineRoots); 612 visitor.append(machineThreadRoots); 613 visitor.drain(); 614 } 615 { 616 GCPHASE(VisitRegisterFileRoots); 617 visitor.append(registerFileRoots); 618 visitor.drain(); 619 } 620 { 621 GCPHASE(VisitProtectedObjects); 622 markProtectedObjects(heapRootVisitor); 623 visitor.drain(); 624 } 625 { 626 GCPHASE(VisitTempSortVectors); 627 markTempSortVectors(heapRootVisitor); 628 visitor.drain(); 629 } 630 631 { 632 GCPHASE(MarkingArgumentBuffers); 633 if (m_markListSet && m_markListSet->size()) { 634 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); 635 visitor.drain(); 636 } 637 } 638 if (m_globalData->exception) { 639 GCPHASE(MarkingException); 640 heapRootVisitor.visit(&m_globalData->exception); 641 visitor.drain(); 642 } 643 644 { 645 GCPHASE(VisitStrongHandles); 646 m_handleHeap.visitStrongHandles(heapRootVisitor); 647 visitor.drain(); 648 } 649 650 { 651 GCPHASE(HandleStack); 652 m_handleStack.visit(heapRootVisitor); 653 visitor.drain(); 654 } 655 656 { 657 GCPHASE(TraceCodeBlocks); 658 m_jettisonedCodeBlocks.traceCodeBlocks(visitor); 659 visitor.drain(); 609 if (m_globalData->codeBlocksBeingCompiled.size()) { 610 GCPHASE(VisitActiveCodeBlock); 611 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) 612 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); 613 } 614 615 { 616 GCPHASE(VisitMachineRoots); 617 visitor.append(machineThreadRoots); 618 visitor.donateAndDrain(); 619 } 620 { 621 GCPHASE(VisitRegisterFileRoots); 622 visitor.append(registerFileRoots); 623 visitor.donateAndDrain(); 624 } 625 { 626 GCPHASE(VisitProtectedObjects); 627 markProtectedObjects(heapRootVisitor); 628 visitor.donateAndDrain(); 629 } 630 { 631 GCPHASE(VisitTempSortVectors); 632 markTempSortVectors(heapRootVisitor); 633 visitor.donateAndDrain(); 634 } 635 636 { 637 GCPHASE(MarkingArgumentBuffers); 638 if (m_markListSet && m_markListSet->size()) { 639 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); 640 visitor.donateAndDrain(); 641 } 642 } 643 if (m_globalData->exception) { 644 GCPHASE(MarkingException); 645 heapRootVisitor.visit(&m_globalData->exception); 646 visitor.donateAndDrain(); 647 } 648 649 { 650 GCPHASE(VisitStrongHandles); 651 m_handleHeap.visitStrongHandles(heapRootVisitor); 652 visitor.donateAndDrain(); 653 } 654 655 { 656 GCPHASE(HandleStack); 657 m_handleStack.visit(heapRootVisitor); 658 visitor.donateAndDrain(); 659 } 660 661 { 662 GCPHASE(TraceCodeBlocks); 663 m_jettisonedCodeBlocks.traceCodeBlocks(visitor); 664 visitor.donateAndDrain(); 665 } 666 667 #if ENABLE(PARALLEL_GC) 668 { 669 GCPHASE(Convergence); 670 visitor.drainFromShared(SlotVisitor::MasterDrain); 671 } 672 #endif 660 673 } 661 674 … … 668 681 lastOpaqueRootCount = visitor.opaqueRootCount(); 669 682 m_handleHeap.visitWeakHandles(heapRootVisitor); 670 visitor.drain(); 683 { 684 ParallelModeEnabler enabler(visitor); 685 visitor.donateAndDrain(); 686 #if ENABLE(PARALLEL_GC) 687 visitor.drainFromShared(SlotVisitor::MasterDrain); 688 #endif 689 } 671 690 // If the set of opaque roots has grown, more weak handles may have become reachable. 672 691 } while (lastOpaqueRootCount != visitor.opaqueRootCount()); … … 674 693 GCCOUNTER(VisitedValueCount, visitor.visitCount()); 675 694 visitor.reset(); 695 676 696 677 697 m_operationInProgress = NoOperation; -
trunk/Source/JavaScriptCore/heap/Heap.h
r96760 r98937 131 131 friend class MarkedBlock; 132 132 friend class AllocationSpace; 133 133 134 134 135 static const size_t minExtraCost = 256; … … 168 169 void blockFreeingThreadMain(); 169 170 static void* blockFreeingThreadStartFunc(void* heap); 170 171 171 172 const HeapSize m_heapSize; 172 173 const size_t m_minBytesPerCycle; … … 197 198 198 199 MachineThreads m_machineThreads; 200 201 199 202 SlotVisitor m_slotVisitor; 203 200 204 HandleHeap m_handleHeap; 201 205 HandleStack m_handleStack; -
trunk/Source/JavaScriptCore/heap/MarkStack.cpp
r97827 r98937 29 29 #include "ConservativeRoots.h" 30 30 #include "Heap.h" 31 31 32 #include "JSArray.h" 32 33 #include "JSCell.h" … … 35 36 #include "Structure.h" 36 37 #include "WriteBarrier.h" 38 37 39 38 40 namespace JSC { 39 41 40 MarkStackArray::MarkStackArray() 41 : m_top(0) 42 , m_allocated(pageSize()) 43 { 44 m_data = static_cast<const JSCell**>(MarkStack::allocateStack(m_allocated)); 45 m_capacity = m_allocated / sizeof(JSCell*); 42 MarkStackSegmentAllocator::MarkStackSegmentAllocator() 43 : m_nextFreeSegment(0) 44 { 45 } 46 47 MarkStackSegmentAllocator::~MarkStackSegmentAllocator() 48 { 49 shrinkReserve(); 50 } 51 52 MarkStackSegment* MarkStackSegmentAllocator::allocate() 53 { 54 { 55 MutexLocker locker(m_lock); 56 if (m_nextFreeSegment) { 57 MarkStackSegment* result = m_nextFreeSegment; 58 m_nextFreeSegment = result->m_previous; 59 return result; 60 } 61 } 62 63 return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Heuristics::gcMarkStackSegmentSize)); 64 } 65 66 void MarkStackSegmentAllocator::release(MarkStackSegment* segment) 67 { 68 MutexLocker locker(m_lock); 69 segment->m_previous = m_nextFreeSegment; 70 m_nextFreeSegment = segment; 71 } 72 73 void MarkStackSegmentAllocator::shrinkReserve() 74 { 75 MarkStackSegment* segments; 76 { 77 MutexLocker locker(m_lock); 78 segments = m_nextFreeSegment; 79 m_nextFreeSegment = 0; 80 } 81 while (segments) { 82 MarkStackSegment* toFree = segments; 83 segments = segments->m_previous; 84 OSAllocator::decommitAndRelease(toFree, Heuristics::gcMarkStackSegmentSize); 85 } 86 } 87 88 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator) 89 : m_allocator(allocator) 90 , m_segmentCapacity(MarkStackSegment::capacityFromSize(Heuristics::gcMarkStackSegmentSize)) 91 , m_top(0) 92 , m_numberOfPreviousSegments(0) 93 { 94 m_topSegment = m_allocator.allocate(); 95 #if !ASSERT_DISABLED 96 m_topSegment->m_top = 0; 97 #endif 98 m_topSegment->m_previous = 0; 46 99 } 47 100 48 101 MarkStackArray::~MarkStackArray() 49 102 { 50 MarkStack::releaseStack(m_data, m_allocated); 103 ASSERT(!m_topSegment->m_previous); 104 m_allocator.release(m_topSegment); 51 105 } 52 106 53 107 void MarkStackArray::expand() 54 108 { 55 size_t oldAllocation = m_allocated; 56 m_allocated *= 2; 57 m_capacity = m_allocated / sizeof(JSCell*); 58 void* newData = MarkStack::allocateStack(m_allocated); 59 memcpy(newData, m_data, oldAllocation); 60 MarkStack::releaseStack(m_data, oldAllocation); 61 m_data = static_cast<const JSCell**>(newData); 62 } 63 64 void MarkStackArray::shrinkAllocation(size_t size) 65 { 66 ASSERT(size <= m_allocated); 67 ASSERT(isPageAligned(size)); 68 if (size == m_allocated) 69 return; 70 #if OS(WINDOWS) 71 // We cannot release a part of a region with VirtualFree. To get around this, 72 // we'll release the entire region and reallocate the size that we want. 73 MarkStack::releaseStack(m_data, m_allocated); 74 m_data = static_cast<const JSCell**>(MarkStack::allocateStack(size)); 109 ASSERT(m_topSegment->m_top == m_segmentCapacity); 110 111 m_numberOfPreviousSegments++; 112 113 MarkStackSegment* nextSegment = m_allocator.allocate(); 114 #if !ASSERT_DISABLED 115 nextSegment->m_top = 0; 116 #endif 117 nextSegment->m_previous = m_topSegment; 118 m_topSegment = nextSegment; 119 setTopForEmptySegment(); 120 validatePrevious(); 121 } 122 123 bool MarkStackArray::refill() 124 { 125 validatePrevious(); 126 if (top()) 127 return true; 128 MarkStackSegment* toFree = m_topSegment; 129 MarkStackSegment* previous = m_topSegment->m_previous; 130 if (!previous) 131 return false; 132 ASSERT(m_numberOfPreviousSegments); 133 m_numberOfPreviousSegments--; 134 m_topSegment = previous; 135 m_allocator.release(toFree); 136 setTopForFullSegment(); 137 validatePrevious(); 138 return true; 139 } 140 141 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other) 142 { 143 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 144 validatePrevious(); 145 other.validatePrevious(); 146 147 // Fast check: see if the other mark stack already has enough segments. 148 if (other.m_numberOfPreviousSegments + 1 >= Heuristics::maximumNumberOfSharedSegments) 149 return false; 150 151 size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep; 152 ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous); 153 154 // Looks like we should donate! Give the other mark stack all of our 155 // previous segments, and then top it off. 156 MarkStackSegment* previous = m_topSegment->m_previous; 157 while (previous) { 158 ASSERT(m_numberOfPreviousSegments); 159 160 MarkStackSegment* current = previous; 161 previous = current->m_previous; 162 163 current->m_previous = other.m_topSegment->m_previous; 164 other.m_topSegment->m_previous = current; 165 166 m_numberOfPreviousSegments--; 167 other.m_numberOfPreviousSegments++; 168 } 169 ASSERT(!m_numberOfPreviousSegments); 170 m_topSegment->m_previous = 0; 171 validatePrevious(); 172 other.validatePrevious(); 173 174 // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if 175 // we really have a lot of work, we give up half. 176 if (m_top > numberOfCellsToKeep * 2) 177 numberOfCellsToKeep = m_top / 2; 178 while (m_top > numberOfCellsToKeep) 179 other.append(removeLast()); 180 181 return true; 182 } 183 184 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other) 185 { 186 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 187 validatePrevious(); 188 other.validatePrevious(); 189 190 // If other has an entire segment, steal it and return. 191 if (other.m_topSegment->m_previous) { 192 ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity); 193 194 // First remove a segment from other. 195 MarkStackSegment* current = other.m_topSegment->m_previous; 196 other.m_topSegment->m_previous = current->m_previous; 197 other.m_numberOfPreviousSegments--; 198 199 ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous); 200 201 // Now add it to this. 202 current->m_previous = m_topSegment->m_previous; 203 m_topSegment->m_previous = current; 204 m_numberOfPreviousSegments++; 205 206 validatePrevious(); 207 other.validatePrevious(); 208 return; 209 } 210 211 // Otherwise drain 1/Nth of the shared array where N is the number of 212 // workers, or Heuristics::minimumNumberOfCellsToKeep, whichever is bigger. 213 size_t numberOfCellsToSteal = std::max((size_t)Heuristics::minimumNumberOfCellsToKeep, other.size() / Heuristics::numberOfGCMarkers); 214 while (numberOfCellsToSteal-- > 0 && other.canRemoveLast()) 215 append(other.removeLast()); 216 } 217 218 #if ENABLE(PARALLEL_GC) 219 void MarkStackThreadSharedData::markingThreadMain() 220 { 221 WTF::registerGCThread(); 222 SlotVisitor slotVisitor(*this, m_globalData->jsArrayVPtr, m_globalData->jsFinalObjectVPtr, m_globalData->jsStringVPtr); 223 ParallelModeEnabler enabler(slotVisitor); 224 slotVisitor.drainFromShared(SlotVisitor::SlaveDrain); 225 } 226 227 void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared) 228 { 229 static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain(); 230 return 0; 231 } 232 #endif 233 234 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData) 235 : m_globalData(globalData) 236 , m_sharedMarkStack(m_segmentAllocator) 237 , m_numberOfActiveParallelMarkers(0) 238 , m_parallelMarkersShouldExit(false) 239 , m_firstWeakReferenceHarvester(0) 240 { 241 #if ENABLE(PARALLEL_GC) 242 for (unsigned i = 1; i < Heuristics::numberOfGCMarkers; ++i) { 243 m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking")); 244 ASSERT(m_markingThreads.last()); 245 } 246 #endif 247 } 248 249 MarkStackThreadSharedData::~MarkStackThreadSharedData() 250 { 251 #if ENABLE(PARALLEL_GC) 252 // Destroy our marking threads. 253 { 254 MutexLocker locker(m_markingLock); 255 m_parallelMarkersShouldExit = true; 256 m_markingCondition.broadcast(); 257 } 258 for (unsigned i = 0; i < m_markingThreads.size(); ++i) 259 waitForThreadCompletion(m_markingThreads[i], 0); 260 #endif 261 } 262 263 void MarkStackThreadSharedData::reset() 264 { 265 ASSERT(!m_numberOfActiveParallelMarkers); 266 ASSERT(!m_parallelMarkersShouldExit); 267 ASSERT(m_sharedMarkStack.isEmpty()); 268 269 #if ENABLE(PARALLEL_GC) 270 m_segmentAllocator.shrinkReserve(); 271 m_opaqueRoots.clear(); 75 272 #else 76 MarkStack::releaseStack(reinterpret_cast<char*>(m_data) + size, m_allocated - size); 77 #endif 78 m_allocated = size; 79 m_capacity = m_allocated / sizeof(JSCell*); 273 ASSERT(m_opaqueRoots.isEmpty()); 274 #endif 80 275 } 81 276 … … 83 278 { 84 279 m_visitCount = 0; 85 m_stack.shrinkAllocation(pageSize()); 280 ASSERT(m_stack.isEmpty()); 281 #if ENABLE(PARALLEL_GC) 282 ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. 283 #else 86 284 m_opaqueRoots.clear(); 285 87 286 } 88 287 … … 121 320 } 122 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 123 336 void SlotVisitor::drain() 124 337 { 338 339 125 340 void* jsFinalObjectVPtr = m_jsFinalObjectVPtr; 126 341 void* jsArrayVPtr = m_jsArrayVPtr; 127 342 void* jsStringVPtr = m_jsStringVPtr; 128 343 129 while (!m_stack.isEmpty()) 130 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 344 #if ENABLE(PARALLEL_GC) 345 if (Heuristics::numberOfGCMarkers > 1) { 346 while (!m_stack.isEmpty()) { 347 m_stack.refill(); 348 for (unsigned countdown = Heuristics::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;) 349 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 350 donateKnownParallel(); 351 } 352 353 mergeOpaqueRootsIfNecessary(); 354 return; 355 } 356 #endif 357 358 while (!m_stack.isEmpty()) { 359 m_stack.refill(); 360 while (m_stack.canRemoveLast()) 361 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 362 } 363 } 364 365 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) 366 { 367 ASSERT(m_isInParallelMode); 368 369 ASSERT(Heuristics::numberOfGCMarkers); 370 371 bool shouldBeParallel; 372 373 #if ENABLE(PARALLEL_GC) 374 shouldBeParallel = Heuristics::numberOfGCMarkers > 1; 375 #else 376 ASSERT(Heuristics::numberOfGCMarkers == 1); 377 shouldBeParallel = false; 378 #endif 379 380 if (!shouldBeParallel) { 381 // This call should be a no-op. 382 ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain); 383 ASSERT(m_stack.isEmpty()); 384 ASSERT(m_shared.m_sharedMarkStack.isEmpty()); 385 return; 386 } 387 388 #if ENABLE(PARALLEL_GC) 389 { 390 MutexLocker locker(m_shared.m_markingLock); 391 m_shared.m_numberOfActiveParallelMarkers++; 392 } 393 while (true) { 394 { 395 MutexLocker locker(m_shared.m_markingLock); 396 m_shared.m_numberOfActiveParallelMarkers--; 397 398 // How we wait differs depending on drain mode. 399 if (sharedDrainMode == MasterDrain) { 400 // Wait until either termination is reached, or until there is some work 401 // for us to do. 402 while (true) { 403 // Did we reach termination? 404 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) 405 return; 406 407 // Is there work to be done? 408 if (!m_shared.m_sharedMarkStack.isEmpty()) 409 break; 410 411 // Otherwise wait. 412 m_shared.m_markingCondition.wait(m_shared.m_markingLock); 413 } 414 } else { 415 ASSERT(sharedDrainMode == SlaveDrain); 416 417 // Did we detect termination? If so, let the master know. 418 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) 419 m_shared.m_markingCondition.broadcast(); 420 421 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) 422 m_shared.m_markingCondition.wait(m_shared.m_markingLock); 423 424 // Is the VM exiting? If so, exit this thread. 425 if (m_shared.m_parallelMarkersShouldExit) 426 return; 427 } 428 429 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack); 430 m_shared.m_numberOfActiveParallelMarkers++; 431 } 432 433 drain(); 434 } 435 #endif 436 } 437 438 void MarkStack::mergeOpaqueRoots() 439 { 440 ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty. 441 { 442 MutexLocker locker(m_shared.m_opaqueRootsLock); 443 HashSet<void*>::iterator begin = m_opaqueRoots.begin(); 444 HashSet<void*>::iterator end = m_opaqueRoots.end(); 445 for (HashSet<void*>::iterator iter = begin; iter != end; ++iter) 446 m_shared.m_opaqueRoots.add(*iter); 447 } 448 m_opaqueRoots.clear(); 131 449 } 132 450 133 451 void SlotVisitor::harvestWeakReferences() 134 452 { 135 while (m_ firstWeakReferenceHarvester) {136 WeakReferenceHarvester* current = m_ firstWeakReferenceHarvester;453 while (m_firstWeakReferenceHarvester) { 454 WeakReferenceHarvester* current = m_firstWeakReferenceHarvester; 137 455 WeakReferenceHarvester* next = reinterpret_cast<WeakReferenceHarvester*>(current->m_nextAndFlag & ~1); 138 456 current->m_nextAndFlag = 0; 139 m_ firstWeakReferenceHarvester = next;457 m_firstWeakReferenceHarvester = next; 140 458 current->visitWeakReferences(*this); 141 459 } -
trunk/Source/JavaScriptCore/heap/MarkStack.h
r97642 r98937 28 28 29 29 #include "HandleTypes.h" 30 30 31 #include "JSValue.h" 31 32 #include "Register.h" … … 44 45 class JSGlobalData; 45 46 class MarkStack; 47 46 48 class Register; 49 47 50 template<typename T> class WriteBarrierBase; 48 51 template<typename T> class JITWriteBarrier; 49 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 50 90 class MarkStackArray { 51 91 public: 52 MarkStackArray( );92 MarkStackArray(); 53 93 ~MarkStackArray(); 54 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 55 113 void expand(); 56 void append(const JSCell*); 57 58 const JSCell* removeLast(); 59 60 bool isEmpty(); 61 62 void shrinkAllocation(size_t); 63 114 115 MarkStackSegmentAllocator& m_allocator; 116 117 size_t m_segmentCapacity; 118 size_t m_top; 119 size_t m_numberOfPreviousSegments; 120 121 size_t postIncTop() 122 { 123 size_t result = m_top++; 124 ASSERT(result == m_topSegment->m_top++); 125 return result; 126 } 127 128 size_t preDecTop() 129 { 130 size_t result = --m_top; 131 ASSERT(result == --m_topSegment->m_top); 132 return result; 133 } 134 135 void setTopForFullSegment() 136 { 137 ASSERT(m_topSegment->m_top == m_segmentCapacity); 138 m_top = m_segmentCapacity; 139 } 140 141 void setTopForEmptySegment() 142 { 143 ASSERT(!m_topSegment->m_top); 144 m_top = 0; 145 } 146 147 size_t top() 148 { 149 ASSERT(m_top == m_topSegment->m_top); 150 return m_top; 151 } 152 153 #if ASSERT_DISABLED 154 void validatePrevious() { } 155 #else 156 void validatePrevious() 157 { 158 unsigned count = 0; 159 for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous) 160 count++; 161 ASSERT(count == m_numberOfPreviousSegments); 162 } 163 #endif 164 }; 165 166 class MarkStackThreadSharedData { 167 public: 168 MarkStackThreadSharedData(JSGlobalData*); 169 ~MarkStackThreadSharedData(); 170 171 void reset(); 172 64 173 private: 65 const JSCell** m_data; 66 size_t m_top; 67 size_t m_capacity; 68 size_t m_allocated; 174 friend class MarkStack; 175 friend class SlotVisitor; 176 177 #if ENABLE(PARALLEL_GC) 178 void markingThreadMain(); 179 static void* markingThreadStartFunc(void* heap); 180 #endif 181 182 JSGlobalData* m_globalData; 183 184 MarkStackSegmentAllocator m_segmentAllocator; 185 186 Vector<ThreadIdentifier> m_markingThreads; 187 188 Mutex m_markingLock; 189 ThreadCondition m_markingCondition; 190 MarkStackArray m_sharedMarkStack; 191 unsigned m_numberOfActiveParallelMarkers; 192 bool m_parallelMarkersShouldExit; 193 194 Mutex m_opaqueRootsLock; 195 HashSet<void*> m_opaqueRoots; 196 197 Mutex m_weakReferenceHarvesterLock; 198 WeakReferenceHarvester* m_firstWeakReferenceHarvester; 69 199 }; 70 200 … … 74 204 75 205 public: 76 static void* allocateStack(size_t); 77 static void releaseStack(void*, size_t); 78 79 MarkStack(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 206 MarkStack(MarkStackThreadSharedData&, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 80 207 ~MarkStack(); 81 208 … … 89 216 void appendUnbarrieredPointer(T**); 90 217 91 booladdOpaqueRoot(void*);218 addOpaqueRoot(void*); 92 219 bool containsOpaqueRoot(void*); 93 220 int opaqueRootCount(); … … 103 230 void addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester) 104 231 { 232 105 233 if (weakReferenceHarvester->m_nextAndFlag & 1) 106 234 return; 107 weakReferenceHarvester->m_nextAndFlag = reinterpret_cast<uintptr_t>(m_ firstWeakReferenceHarvester) | 1;108 m_ firstWeakReferenceHarvester = weakReferenceHarvester;235 weakReferenceHarvester->m_nextAndFlag = reinterpret_cast<uintptr_t>(m_firstWeakReferenceHarvester) | 1; 236 m_firstWeakReferenceHarvester = weakReferenceHarvester; 109 237 } 110 238 … … 118 246 void internalAppend(JSCell*); 119 247 void internalAppend(JSValue); 120 248 249 void mergeOpaqueRoots(); 250 251 void mergeOpaqueRootsIfNecessary() 252 { 253 if (m_opaqueRoots.isEmpty()) 254 return; 255 mergeOpaqueRoots(); 256 } 257 258 void mergeOpaqueRootsIfProfitable() 259 { 260 if (static_cast<unsigned>(m_opaqueRoots.size()) < Heuristics::opaqueRootMergeThreshold) 261 return; 262 mergeOpaqueRoots(); 263 } 264 121 265 MarkStackArray m_stack; 122 266 void* m_jsArrayVPtr; … … 124 268 void* m_jsStringVPtr; 125 269 HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector. 126 WeakReferenceHarvester* m_firstWeakReferenceHarvester;127 270 128 271 #if !ASSERT_DISABLED … … 132 275 #endif 133 276 protected: 277 278 134 279 size_t m_visitCount; 135 }; 136 137 inline MarkStack::MarkStack(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 138 : m_jsArrayVPtr(jsArrayVPtr) 280 bool m_isInParallelMode; 281 282 MarkStackThreadSharedData& m_shared; 283 }; 284 285 inline MarkStack::MarkStack(MarkStackThreadSharedData& shared, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 286 : m_stack(shared.m_segmentAllocator) 287 , m_jsArrayVPtr(jsArrayVPtr) 139 288 , m_jsFinalObjectVPtr(jsFinalObjectVPtr) 140 289 , m_jsStringVPtr(jsStringVPtr) 141 , m_firstWeakReferenceHarvester(0)142 290 #if !ASSERT_DISABLED 143 291 , m_isCheckingForDefaultMarkViolation(false) … … 145 293 #endif 146 294 , m_visitCount(0) 295 296 147 297 { 148 298 } … … 153 303 } 154 304 155 inline bool MarkStack::addOpaqueRoot(void* root) 156 { 157 return m_opaqueRoots.add(root).second; 305 inline void MarkStack::addOpaqueRoot(void* root) 306 { 307 #if ENABLE(PARALLEL_GC) 308 if (Heuristics::numberOfGCMarkers == 1) { 309 // Put directly into the shared HashSet. 310 m_shared.m_opaqueRoots.add(root); 311 return; 312 } 313 // Put into the local set, but merge with the shared one every once in 314 // a while to make sure that the local sets don't grow too large. 315 mergeOpaqueRootsIfProfitable(); 316 m_opaqueRoots.add(root); 317 #else 318 m_opaqueRoots.add(root); 319 #endif 158 320 } 159 321 160 322 inline bool MarkStack::containsOpaqueRoot(void* root) 161 323 { 324 325 326 327 328 162 329 return m_opaqueRoots.contains(root); 330 163 331 } 164 332 165 333 inline int MarkStack::opaqueRootCount() 166 334 { 335 336 337 338 339 167 340 return m_opaqueRoots.size(); 168 } 169 170 inline void* MarkStack::allocateStack(size_t size) 171 { 172 return OSAllocator::reserveAndCommit(size); 173 } 174 175 inline void MarkStack::releaseStack(void* addr, size_t size) 176 { 177 OSAllocator::decommitAndRelease(addr, size); 341 #endif 178 342 } 179 343 180 344 inline void MarkStackArray::append(const JSCell* cell) 181 345 { 182 if (m_top == m_ capacity)346 if (m_top == m_apacity) 183 347 expand(); 184 m_data[m_top++] = cell; 348 m_topSegment->data()[postIncTop()] = cell; 349 } 350 351 inline bool MarkStackArray::canRemoveLast() 352 { 353 return !!m_top; 185 354 } 186 355 187 356 inline const JSCell* MarkStackArray::removeLast() 188 357 { 189 ASSERT(m_top); 190 return m_data[--m_top]; 191 } 192 358 return m_topSegment->data()[preDecTop()]; 359 } 360 193 361 inline bool MarkStackArray::isEmpty() 194 362 { 195 return !m_top; 363 if (m_top) 364 return false; 365 if (m_topSegment->m_previous) { 366 ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); 367 return false; 368 } 369 return true; 370 } 371 372 inline bool MarkStackArray::canDonateSomeCells() 373 { 374 size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep; 375 // Another check: see if we have enough cells to warrant donation. 376 if (m_top <= numberOfCellsToKeep) { 377 // This indicates that we might not want to donate anything; check if we have 378 // another full segment. If not, then don't donate. 379 if (!m_topSegment->m_previous) 380 return false; 381 382 ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); 383 } 384 385 return true; 386 } 387 388 inline size_t MarkStackArray::size() 389 { 390 return m_top + m_segmentCapacity * m_numberOfPreviousSegments; 196 391 } 197 392 … … 235 430 } 236 431 237 class SlotVisitor; 432 class ParallelModeEnabler { 433 public: 434 ParallelModeEnabler(MarkStack& stack) 435 : m_stack(stack) 436 { 437 ASSERT(!m_stack.m_isInParallelMode); 438 m_stack.m_isInParallelMode = true; 439 } 440 441 ~ParallelModeEnabler() 442 { 443 ASSERT(m_stack.m_isInParallelMode); 444 m_stack.m_isInParallelMode = false; 445 } 446 447 private: 448 MarkStack& m_stack; 449 }; 238 450 239 451 } // namespace JSC -
trunk/Source/JavaScriptCore/heap/MarkedBlock.h
r97203 r98937 175 175 size_t m_atomsPerCell; 176 176 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. 177 WTF::Bitmap<atomsPerBlock> m_marks; 177 #if ENABLE(PARALLEL_GC) 178 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks; 179 #else 180 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; 181 #endif 178 182 BlockState m_state; 179 183 PageAllocationAligned m_allocation; … … 265 269 inline bool MarkedBlock::testAndSetMarked(const void* p) 266 270 { 267 return m_marks. testAndSet(atomNumber(p));271 return m_marks.estAndSet(atomNumber(p)); 268 272 } 269 273 -
trunk/Source/JavaScriptCore/heap/SlotVisitor.h
r97642 r98937 31 31 namespace JSC { 32 32 33 34 33 35 class SlotVisitor : public MarkStack { 34 36 friend class HeapRootVisitor; 35 37 public: 36 SlotVisitor( void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr);38 SlotVisitor(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 37 39 40 41 42 43 44 45 46 47 48 38 49 void drain(); 50 51 52 53 54 55 56 57 58 59 39 60 void harvestWeakReferences(); 61 62 63 64 65 66 67 68 69 70 40 71 }; 41 72 42 inline SlotVisitor::SlotVisitor( void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr)43 : MarkStack( jsArrayVPtr, jsFinalObjectVPtr, jsStringVPtr)73 inline SlotVisitor::SlotVisitor(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 74 : MarkStack(jsArrayVPtr, jsFinalObjectVPtr, jsStringVPtr) 44 75 { 45 76 } -
trunk/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
r95901 r98937 26 26 27 27 class MarkStack; 28 28 29 class SlotVisitor; 29 30 … … 42 43 private: 43 44 friend class MarkStack; 45 44 46 friend class SlotVisitor; 45 47 -
trunk/Source/JavaScriptCore/runtime/Heuristics.cpp
r98912 r98937 26 26 #include "config.h" 27 27 #include "Heuristics.h" 28 28 29 29 30 #include <limits> 31 32 33 34 30 35 31 36 // Set to 1 to control the heuristics using environment variables. … … 73 78 double desiredProfileLivenessRate; 74 79 double desiredProfileFullnessRate; 80 81 82 83 84 85 86 87 75 88 76 89 #if ENABLE(RUN_TIME_HEURISTICS) … … 149 162 SET(desiredProfileFullnessRate, 0.35); 150 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 151 188 ASSERT(executionCounterValueForDontOptimizeAnytimeSoon <= executionCounterValueForOptimizeAfterLongWarmUp); 152 189 ASSERT(executionCounterValueForOptimizeAfterLongWarmUp <= executionCounterValueForOptimizeAfterWarmUp); -
trunk/Source/JavaScriptCore/runtime/Heuristics.h
r98214 r98937 65 65 extern double desiredProfileFullnessRate; 66 66 67 68 69 70 71 72 73 74 67 75 void initializeHeuristics(); 68 76 -
trunk/Source/JavaScriptCore/wtf/Atomics.h
r97557 r98937 61 61 62 62 #include "Platform.h" 63 63 64 64 65 #if OS(WINDOWS) … … 117 118 #endif 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 119 143 } // namespace WTF 120 144 -
trunk/Source/JavaScriptCore/wtf/Bitmap.h
r95901 r98937 20 20 #define Bitmap_h 21 21 22 22 23 #include "FixedArray.h" 23 24 #include "StdLibExtras.h" … … 27 28 namespace WTF { 28 29 29 template<size_t size> 30 enum BitmapAtomicMode { 31 // This makes concurrentTestAndSet behave just like testAndSet. 32 BitmapNotAtomic, 33 34 // This makes concurrentTestAndSet use compareAndSwap, so that it's 35 // atomic even when used concurrently. 36 BitmapAtomic 37 }; 38 39 template<size_t size, BitmapAtomicMode atomicMode = BitmapNotAtomic> 30 40 class Bitmap { 31 41 private: … … 39 49 bool testAndSet(size_t); 40 50 bool testAndClear(size_t); 51 52 41 53 size_t nextPossiblyUnset(size_t) const; 42 54 void clear(size_t); … … 61 73 }; 62 74 63 template<size_t size >64 inline Bitmap<size >::Bitmap()75 template<size_t size> 76 inline Bitmap<size>::Bitmap() 65 77 { 66 78 clearAll(); 67 79 } 68 80 69 template<size_t size >70 inline bool Bitmap<size >::get(size_t n) const81 template<size_t size> 82 inline bool Bitmap<size>::get(size_t n) const 71 83 { 72 84 return !!(bits[n / wordSize] & (one << (n % wordSize))); 73 85 } 74 86 75 template<size_t size >76 inline void Bitmap<size >::set(size_t n)87 template<size_t size> 88 inline void Bitmap<size>::set(size_t n) 77 89 { 78 90 bits[n / wordSize] |= (one << (n % wordSize)); 79 91 } 80 92 81 template<size_t size >82 inline bool Bitmap<size >::testAndSet(size_t n)93 template<size_t size> 94 inline bool Bitmap<size>::testAndSet(size_t n) 83 95 { 84 96 WordType mask = one << (n % wordSize); … … 89 101 } 90 102 91 template<size_t size >92 inline bool Bitmap<size >::testAndClear(size_t n)103 template<size_t size> 104 inline bool Bitmap<size>::testAndClear(size_t n) 93 105 { 94 106 WordType mask = one << (n % wordSize); … … 99 111 } 100 112 101 template<size_t size> 102 inline void Bitmap<size>::clear(size_t n) 113 template<size_t size, BitmapAtomicMode atomicMode> 114 inline bool Bitmap<size, atomicMode>::concurrentTestAndSet(size_t n) 115 { 116 if (atomicMode == BitmapNotAtomic) 117 return testAndSet(n); 118 119 ASSERT(atomicMode == BitmapAtomic); 120 121 WordType mask = one << (n % wordSize); 122 size_t index = n / wordSize; 123 WordType* wordPtr = bits.data() + index; 124 WordType oldValue; 125 do { 126 oldValue = *wordPtr; 127 if (oldValue & mask) 128 return true; 129 } while (!weakCompareAndSwap(wordPtr, oldValue, oldValue | mask)); 130 return false; 131 } 132 133 template<size_t size, BitmapAtomicMode atomicMode> 134 inline bool Bitmap<size, atomicMode>::concurrentTestAndClear(size_t n) 135 { 136 if (atomicMode == BitmapNotAtomic) 137 return testAndClear(n); 138 139 ASSERT(atomicMode == BitmapAtomic); 140 141 WordType mask = one << (n % wordSize); 142 size_t index = n / wordSize; 143 WordType* wordPtr = bits.data() + index; 144 WordType oldValue; 145 do { 146 oldValue = *wordPtr; 147 if (!(oldValue & mask)) 148 return false; 149 } while (!weakCompareAndSwap(wordPtr, oldValue, oldValue & ~mask)); 150 return true; 151 } 152 153 template<size_t size, BitmapAtomicMode atomicMode> 154 inline void Bitmap<size, atomicMode>::clear(size_t n) 103 155 { 104 156 bits[n / wordSize] &= ~(one << (n % wordSize)); 105 157 } 106 158 107 template<size_t size >108 inline void Bitmap<size >::clearAll()159 template<size_t size> 160 inline void Bitmap<size>::clearAll() 109 161 { 110 162 memset(bits.data(), 0, sizeof(bits)); 111 163 } 112 164 113 template<size_t size >114 inline size_t Bitmap<size >::nextPossiblyUnset(size_t start) const165 template<size_t size> 166 inline size_t Bitmap<size>::nextPossiblyUnset(size_t start) const 115 167 { 116 168 if (!~bits[start / wordSize]) … … 119 171 } 120 172 121 template<size_t size >122 inline int64_t Bitmap<size >::findRunOfZeros(size_t runLength) const173 template<size_t size> 174 inline int64_t Bitmap<size>::findRunOfZeros(size_t runLength) const 123 175 { 124 176 if (!runLength) … … 139 191 } 140 192 141 template<size_t size >142 inline size_t Bitmap<size >::count(size_t start) const193 template<size_t size> 194 inline size_t Bitmap<size>::count(size_t start) const 143 195 { 144 196 size_t result = 0; … … 152 204 } 153 205 154 template<size_t size >155 inline size_t Bitmap<size >::isEmpty() const206 template<size_t size> 207 inline size_t Bitmap<size>::isEmpty() const 156 208 { 157 209 for (size_t i = 0; i < words; ++i) … … 161 213 } 162 214 163 template<size_t size >164 inline size_t Bitmap<size >::isFull() const215 template<size_t size> 216 inline size_t Bitmap<size>::isFull() const 165 217 { 166 218 for (size_t i = 0; i < words; ++i) -
trunk/Source/JavaScriptCore/wtf/MainThread.h
r97073 r98937 50 50 51 51 bool isMainThread(); 52 53 54 55 56 57 58 59 52 60 53 61 // NOTE: these functions are internal to the callOnMainThread implementation. … … 71 79 using WTF::setMainThreadCallbacksPaused; 72 80 using WTF::isMainThread; 81 73 82 #endif // MainThread_h -
trunk/Source/JavaScriptCore/wtf/Platform.h
r98629 r98937 1075 1075 #endif 1076 1076 1077 1078 1079 1080 1081 1082 1083 1084 1077 1085 #ifndef NDEBUG 1078 1086 #ifndef ENABLE_GC_VALIDATION -
trunk/Source/JavaScriptCore/wtf/ThreadSpecific.h
r94890 r98937 66 66 public: 67 67 ThreadSpecific(); 68 68 69 T* operator->(); 69 70 operator T*(); … … 276 277 277 278 template<typename T> 279 280 281 282 283 284 278 285 inline ThreadSpecific<T>::operator T*() 279 286 { -
trunk/Source/JavaScriptCore/wtf/mac/MainThreadMac.mm
r95448 r98937 34 34 #import <stdio.h> 35 35 #import <wtf/Assertions.h> 36 36 37 #import <wtf/Threading.h> 38 37 39 38 40 @interface JSWTFMainThreadCaller : NSObject { … … 58 60 static NSThread* mainThreadNSThread; 59 61 62 63 64 65 66 67 68 69 70 71 72 60 73 void initializeMainThreadPlatform() 61 74 { … … 66 79 mainThreadPthread = pthread_self(); 67 80 mainThreadNSThread = [[NSThread currentThread] retain]; 81 82 68 83 } 69 84 … … 79 94 mainThreadPthread = 0; 80 95 mainThreadNSThread = nil; 96 97 81 98 } 82 99 … … 129 146 } 130 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 131 175 } // namespace WTF -
trunk/Source/WebCore/ChangeLog
r98936 r98937 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 16 2011-10-31 Andy Estes <aestes@apple.com> 2 17 -
trunk/Source/WebCore/platform/TreeShared.h
r97137 r98937 101 101 T* parent() const 102 102 { 103 ASSERT(isMainThread ());103 ASSERT(isMainThread()); 104 104 return m_parent; 105 105 }
Note:
See TracChangeset
for help on using the changeset viewer.