| Differences between
and this patch
- a/Source/JavaScriptCore/ChangeLog +63 lines
Lines 1-5 a/Source/JavaScriptCore/ChangeLog_sec1
1
2015-06-30  Basile Clement  <basile_clement@apple.com>
1
2015-06-30  Basile Clement  <basile_clement@apple.com>
2
2
3
        Object cycles should not prevent allocation elimination/sinking
4
        https://bugs.webkit.org/show_bug.cgi?id=143073
5
6
        Reviewed by NOBODY (OOPS!).
7
8
        This patch introduces a new allocation sinking phase that is able to
9
        sink cycles, in DFGAllocationCycleSinkingPhase.cpp. This phase
10
        supersedes the old allocation sinking phase in
11
        DFGObjectAllocationSinkingPhase.cpp, as that previous phase was never
12
        able to sink allocation cycles while the new phase sometimes can; see
13
        DFGAllocationCycleSinkingPhase.cpp for details.
14
15
        For now, the new sinking phase is kept behind a
16
        JSC_enableAllocationCycleSinking flag that reverts to the old sinking
17
        phase when false (i.e., by default). This also removes the old
18
        JSC_enableObjectAllocationSinking flag. run-javascriptcore-tests
19
        defaults to using the new sinking phase.
20
21
        * CMakeLists.txt:
22
        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
23
        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
24
        * JavaScriptCore.xcodeproj/project.pbxproj:
25
        * dfg/DFGAllocationCycleSinkingPhase.cpp: Added.
26
        (JSC::DFG::performAllocationCycleSinking):
27
        * dfg/DFGAllocationCycleSinkingPhase.h: Added.
28
        * dfg/DFGGraph.h:
29
        (JSC::DFG::Graph::addStructureSet): Allow empty structure sets
30
        * dfg/DFGLazyNode.cpp:
31
        (JSC::DFG::LazyNode::dump): Prettier dump
32
        * dfg/DFGNode.h:
33
        (JSC::DFG::Node::cellOperand): Move to opInfo for MaterializeCreateActivation
34
        (JSC::DFG::Node::hasStructureSet): Add MaterializeNewObject
35
        (JSC::DFG::Node::objectMaterializationData): Move to opInfo2
36
        * dfg/DFGOSRAvailabilityAnalysisPhase.cpp: Remove unused header
37
        * dfg/DFGObjectAllocationSinkingPhase.cpp: MaterializeNewObject/MaterializeCreateActivation changed
38
        (JSC::DFG::ObjectAllocationSinkingPhase::run):
39
        (JSC::DFG::ObjectAllocationSinkingPhase::lowerNonReadingOperationsOnPhantomAllocations):
40
        (JSC::DFG::ObjectAllocationSinkingPhase::fillStructureSets):
41
        (JSC::DFG::ObjectAllocationSinkingPhase::createMaterialize):
42
        (JSC::DFG::ObjectAllocationSinkingPhase::populateMaterialize):
43
        * dfg/DFGPlan.cpp:
44
        (JSC::DFG::Plan::compileInThreadImpl):
45
        * dfg/DFGPromotedHeapLocation.h: Add a hash and a helper function to PromotedLocationDescriptor
46
        (JSC::DFG::PromotedLocationDescriptor::PromotedLocationDescriptor):
47
        (JSC::DFG::PromotedLocationDescriptor::operator bool):
48
        (JSC::DFG::PromotedLocationDescriptorHash::hash):
49
        (JSC::DFG::PromotedLocationDescriptorHash::equal):
50
        (JSC::DFG::PromotedLocationDescriptor::neededForMaterialization):
51
        * dfg/DFGValidate.cpp:
52
        (JSC::DFG::Validate::validateSSA): Assert that most nodes never see a phantom allocation
53
        * ftl/FTLLowerDFGToLLVM.cpp: 
54
        (JSC::FTL::DFG::LowerDFGToLLVM::compileMaterializeNewObject): Use the new structureSet() operand
55
        (JSC::FTL::DFG::LowerDFGToLLVM::compileMaterializeCreateActivation): Node has a new child
56
        * ftl/FTLOSRExitCompiler.cpp: Handle materialization cycles
57
        (JSC::FTL::compileStub):
58
        * ftl/FTLOperations.cpp: Handle materialization cycles
59
        (JSC::FTL::operationPopulateObjectInOSR):
60
        (JSC::FTL::operationMaterializeObjectInOSR):
61
        * ftl/FTLOperations.h: Handle materialization cycles
62
        * runtime/Options.h: Remove old JSC_enableObjectAllocationSinking and add JSC_enableAllocationCycleSinking
63
64
2015-06-30  Basile Clement  <basile_clement@apple.com>
65
3
        Allow object allocation sinking through GetScope, GetExecutable and SkipScope nodes
66
        Allow object allocation sinking through GetScope, GetExecutable and SkipScope nodes
4
        https://bugs.webkit.org/show_bug.cgi?id=146431
67
        https://bugs.webkit.org/show_bug.cgi?id=146431
5
68
- a/Source/JavaScriptCore/CMakeLists.txt +1 lines
Lines 126-131 set(JavaScriptCore_SOURCES a/Source/JavaScriptCore/CMakeLists.txt_sec1
126
126
127
    dfg/DFGAbstractHeap.cpp
127
    dfg/DFGAbstractHeap.cpp
128
    dfg/DFGAbstractValue.cpp
128
    dfg/DFGAbstractValue.cpp
129
    dfg/DFGAllocationCycleSinkingPhase.cpp
129
    dfg/DFGArgumentsEliminationPhase.cpp
130
    dfg/DFGArgumentsEliminationPhase.cpp
130
    dfg/DFGArgumentsUtilities.cpp
131
    dfg/DFGArgumentsUtilities.cpp
131
    dfg/DFGArithMode.cpp
132
    dfg/DFGArithMode.cpp
- a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj +2 lines
Lines 364-369 a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj_sec1
364
    <ClCompile Include="..\debugger\DebuggerScope.cpp" />
364
    <ClCompile Include="..\debugger\DebuggerScope.cpp" />
365
    <ClCompile Include="..\dfg\DFGAbstractHeap.cpp" />
365
    <ClCompile Include="..\dfg\DFGAbstractHeap.cpp" />
366
    <ClCompile Include="..\dfg\DFGAbstractValue.cpp" />
366
    <ClCompile Include="..\dfg\DFGAbstractValue.cpp" />
367
    <ClCompile Include="..\dfg\DFGAllocationCycleSinkingPhase.cpp" />
367
    <ClCompile Include="..\dfg\DFGArgumentsEliminationPhase.cpp" />
368
    <ClCompile Include="..\dfg\DFGArgumentsEliminationPhase.cpp" />
368
    <ClCompile Include="..\dfg\DFGArgumentsUtilities.cpp" />
369
    <ClCompile Include="..\dfg\DFGArgumentsUtilities.cpp" />
369
    <ClCompile Include="..\dfg\DFGArithMode.cpp" />
370
    <ClCompile Include="..\dfg\DFGArithMode.cpp" />
Lines 1037-1042 a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj_sec2
1037
    <ClInclude Include="..\dfg\DFGAbstractValue.h" />
1038
    <ClInclude Include="..\dfg\DFGAbstractValue.h" />
1038
    <ClInclude Include="..\dfg\DFGAdjacencyList.h" />
1039
    <ClInclude Include="..\dfg\DFGAdjacencyList.h" />
1039
    <ClInclude Include="..\dfg\DFGAllocator.h" />
1040
    <ClInclude Include="..\dfg\DFGAllocator.h" />
1041
    <ClInclude Include="..\dfg\DFGAllocationCycleSinkingPhase.h" />
1040
    <ClInclude Include="..\dfg\DFGAnalysis.h" />
1042
    <ClInclude Include="..\dfg\DFGAnalysis.h" />
1041
    <ClInclude Include="..\dfg\DFGArgumentPosition.h" />
1043
    <ClInclude Include="..\dfg\DFGArgumentPosition.h" />
1042
    <ClInclude Include="..\dfg\DFGArgumentsEliminationPhase.h" />
1044
    <ClInclude Include="..\dfg\DFGArgumentsEliminationPhase.h" />
- a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters +6 lines
Lines 1680-1685 a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters_sec1
1680
    <ClCompile Include="..\dfg\DFGStructureAbstractValue.cpp">
1680
    <ClCompile Include="..\dfg\DFGStructureAbstractValue.cpp">
1681
      <Filter>dfg</Filter>
1681
      <Filter>dfg</Filter>
1682
    </ClCompile>
1682
    </ClCompile>
1683
    <ClCompile Include="..\dfg\DFGAllocationCycleSinkingPhase.cpp">
1684
      <Filter>dfg</Filter>
1685
    </ClCompile>
1683
    <ClCompile Include="..\dfg\DFGStructureRegistrationPhase.cpp">
1686
    <ClCompile Include="..\dfg\DFGStructureRegistrationPhase.cpp">
1684
      <Filter>dfg</Filter>
1687
      <Filter>dfg</Filter>
1685
    </ClCompile>
1688
    </ClCompile>
Lines 3391-3396 a/Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters_sec2
3391
    <ClInclude Include="..\dfg\DFGAllocator.h">
3394
    <ClInclude Include="..\dfg\DFGAllocator.h">
3392
      <Filter>dfg</Filter>
3395
      <Filter>dfg</Filter>
3393
    </ClInclude>
3396
    </ClInclude>
3397
    <ClInclude Include="..\dfg\DFGAllocationCycleSinkingPhase.h">
3398
      <Filter>dfg</Filter>
3399
    </ClInclude>
3394
    <ClInclude Include="..\dfg\DFGAnalysis.h">
3400
    <ClInclude Include="..\dfg\DFGAnalysis.h">
3395
      <Filter>dfg</Filter>
3401
      <Filter>dfg</Filter>
3396
    </ClInclude>
3402
    </ClInclude>
- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +8 lines
Lines 962-967 a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj_sec1
962
		5DBB151B131D0B310056AD36 /* testapi.js in Copy Support Script */ = {isa = PBXBuildFile; fileRef = 14D857740A4696C80032146C /* testapi.js */; };
962
		5DBB151B131D0B310056AD36 /* testapi.js in Copy Support Script */ = {isa = PBXBuildFile; fileRef = 14D857740A4696C80032146C /* testapi.js */; };
963
		5DBB1525131D0BD70056AD36 /* minidom.js in Copy Support Script */ = {isa = PBXBuildFile; fileRef = 1412110D0A48788700480255 /* minidom.js */; };
963
		5DBB1525131D0BD70056AD36 /* minidom.js in Copy Support Script */ = {isa = PBXBuildFile; fileRef = 1412110D0A48788700480255 /* minidom.js */; };
964
		5DE6E5B30E1728EC00180407 /* create_hash_table in Headers */ = {isa = PBXBuildFile; fileRef = F692A8540255597D01FF60F7 /* create_hash_table */; settings = {ATTRIBUTES = (); }; };
964
		5DE6E5B30E1728EC00180407 /* create_hash_table in Headers */ = {isa = PBXBuildFile; fileRef = F692A8540255597D01FF60F7 /* create_hash_table */; settings = {ATTRIBUTES = (); }; };
965
		6277CC221B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 6277CC201B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.cpp */; };
966
		6277CC231B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 6277CC211B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.h */; };
965
		62D2D38F1ADF103F000206C1 /* FunctionRareData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */; };
967
		62D2D38F1ADF103F000206C1 /* FunctionRareData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */; };
966
		62D2D3901ADF103F000206C1 /* FunctionRareData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */; settings = {ATTRIBUTES = (Private, ); }; };
968
		62D2D3901ADF103F000206C1 /* FunctionRareData.h in Headers */ = {isa = PBXBuildFile; fileRef = 62D2D38E1ADF103F000206C1 /* FunctionRareData.h */; settings = {ATTRIBUTES = (Private, ); }; };
967
		62F2AA371B0BEDE300610C7A /* DFGLazyNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */; };
969
		62F2AA371B0BEDE300610C7A /* DFGLazyNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */; };
Lines 2679-2684 a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj_sec2
2679
		5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = JSC.xcconfig; sourceTree = "<group>"; };
2681
		5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = JSC.xcconfig; sourceTree = "<group>"; };
2680
		5DDDF44614FEE72200B4FB4D /* LLIntDesiredOffsets.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntDesiredOffsets.h; path = LLIntOffsets/LLIntDesiredOffsets.h; sourceTree = BUILT_PRODUCTS_DIR; };
2682
		5DDDF44614FEE72200B4FB4D /* LLIntDesiredOffsets.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntDesiredOffsets.h; path = LLIntOffsets/LLIntDesiredOffsets.h; sourceTree = BUILT_PRODUCTS_DIR; };
2681
		5DE3D0F40DD8DDFB00468714 /* WebKitAvailability.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WebKitAvailability.h; sourceTree = "<group>"; };
2683
		5DE3D0F40DD8DDFB00468714 /* WebKitAvailability.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WebKitAvailability.h; sourceTree = "<group>"; };
2684
		6277CC201B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGAllocationCycleSinkingPhase.cpp; path = dfg/DFGAllocationCycleSinkingPhase.cpp; sourceTree = "<group>"; };
2685
		6277CC211B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGAllocationCycleSinkingPhase.h; path = dfg/DFGAllocationCycleSinkingPhase.h; sourceTree = "<group>"; };
2682
		62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGLazyNode.cpp; path = dfg/DFGLazyNode.cpp; sourceTree = "<group>"; };
2686
		62A9A29E1B0BED4800BD54CA /* DFGLazyNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGLazyNode.cpp; path = dfg/DFGLazyNode.cpp; sourceTree = "<group>"; };
2683
		62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGLazyNode.h; path = dfg/DFGLazyNode.h; sourceTree = "<group>"; };
2687
		62A9A29F1B0BED4800BD54CA /* DFGLazyNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGLazyNode.h; path = dfg/DFGLazyNode.h; sourceTree = "<group>"; };
2684
		62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionRareData.cpp; sourceTree = "<group>"; };
2688
		62D2D38D1ADF103F000206C1 /* FunctionRareData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionRareData.cpp; sourceTree = "<group>"; };
Lines 4852-4857 a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj_sec3
4852
				0F55C19317276E4600CEABFD /* DFGAbstractValue.cpp */,
4856
				0F55C19317276E4600CEABFD /* DFGAbstractValue.cpp */,
4853
				0F62016F143FCD2F0068B77C /* DFGAbstractValue.h */,
4857
				0F62016F143FCD2F0068B77C /* DFGAbstractValue.h */,
4854
				0F66E16814DF3F1300B7B2E4 /* DFGAdjacencyList.h */,
4858
				0F66E16814DF3F1300B7B2E4 /* DFGAdjacencyList.h */,
4859
				6277CC201B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.cpp */,
4860
				6277CC211B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.h */,
4855
				0FB4B51916B62772003F696B /* DFGAllocator.h */,
4861
				0FB4B51916B62772003F696B /* DFGAllocator.h */,
4856
				A73781091799EA2E00817533 /* DFGAnalysis.h */,
4862
				A73781091799EA2E00817533 /* DFGAnalysis.h */,
4857
				0F1E3A431534CBAD000F9456 /* DFGArgumentPosition.h */,
4863
				0F1E3A431534CBAD000F9456 /* DFGArgumentPosition.h */,
Lines 6126-6131 a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj_sec4
6126
				0F2B66E917B6B5AB00A7AE3F /* JSArrayBufferView.h in Headers */,
6132
				0F2B66E917B6B5AB00A7AE3F /* JSArrayBufferView.h in Headers */,
6127
				0F2B66EA17B6B5AB00A7AE3F /* JSArrayBufferViewInlines.h in Headers */,
6133
				0F2B66EA17B6B5AB00A7AE3F /* JSArrayBufferViewInlines.h in Headers */,
6128
				A7BDAECB17F4EA1400F6140C /* JSArrayIterator.h in Headers */,
6134
				A7BDAECB17F4EA1400F6140C /* JSArrayIterator.h in Headers */,
6135
				6277CC231B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.h in Headers */,
6129
				BC18C4180E16F5CD00B34460 /* JSBase.h in Headers */,
6136
				BC18C4180E16F5CD00B34460 /* JSBase.h in Headers */,
6130
				0F2D4DE919832DAC007D4B19 /* ToThisStatus.h in Headers */,
6137
				0F2D4DE919832DAC007D4B19 /* ToThisStatus.h in Headers */,
6131
				140D17D70E8AD4A9000CD17D /* JSBasePrivate.h in Headers */,
6138
				140D17D70E8AD4A9000CD17D /* JSBasePrivate.h in Headers */,
Lines 7101-7106 a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj_sec5
7101
				0FC712DE17CD8779008CC93C /* DeferredCompilationCallback.cpp in Sources */,
7108
				0FC712DE17CD8779008CC93C /* DeferredCompilationCallback.cpp in Sources */,
7102
				A77A423D17A0BBFD00A8DB81 /* DFGAbstractHeap.cpp in Sources */,
7109
				A77A423D17A0BBFD00A8DB81 /* DFGAbstractHeap.cpp in Sources */,
7103
				0F55C19417276E4600CEABFD /* DFGAbstractValue.cpp in Sources */,
7110
				0F55C19417276E4600CEABFD /* DFGAbstractValue.cpp in Sources */,
7111
				6277CC221B3DE25800D16D2E /* DFGAllocationCycleSinkingPhase.cpp in Sources */,
7104
				0F485321187750560083B687 /* DFGArithMode.cpp in Sources */,
7112
				0F485321187750560083B687 /* DFGArithMode.cpp in Sources */,
7105
				0F2D4DDD19832D34007D4B19 /* DebuggerScope.cpp in Sources */,
7113
				0F2D4DDD19832D34007D4B19 /* DebuggerScope.cpp in Sources */,
7106
				0F63948415E48118006A597C /* DFGArrayMode.cpp in Sources */,
7114
				0F63948415E48118006A597C /* DFGArrayMode.cpp in Sources */,
- a/Source/JavaScriptCore/dfg/DFGAllocationCycleSinkingPhase.cpp +2128 lines
Line 0 a/Source/JavaScriptCore/dfg/DFGAllocationCycleSinkingPhase.cpp_sec1
1
/*
2
 * Copyright (C) 2015 Apple Inc. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 * 1. Redistributions of source code must retain the above copyright
8
 *    notice, this list of conditions and the following disclaimer.
9
 * 2. Redistributions in binary form must reproduce the above copyright
10
 *    notice, this list of conditions and the following disclaimer in the
11
 *    documentation and/or other materials provided with the distribution.
12
 *
13
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
 */
25
26
#include "config.h"
27
#include "DFGAllocationCycleSinkingPhase.h"
28
29
#if ENABLE(DFG_JIT)
30
31
#include "DFGBlockMapInlines.h"
32
#include "DFGCombinedLiveness.h"
33
#include "DFGGraph.h"
34
#include "DFGInsertionSet.h"
35
#include "DFGLazyNode.h"
36
#include "DFGLivenessAnalysisPhase.h"
37
#include "DFGOSRAvailabilityAnalysisPhase.h"
38
#include "DFGPhase.h"
39
#include "DFGPromotedHeapLocation.h"
40
#include "DFGSSACalculator.h"
41
#include "DFGValidate.h"
42
#include "JSCInlines.h"
43
44
#include <list>
45
46
namespace JSC { namespace DFG {
47
48
namespace {
49
50
bool verbose = false;
51
52
// In order to sink object cycles, we use a points-to analysis coupled
53
// with an escape analysis. This analysis is actually similar to an
54
// abstract interpreter focused on local allocations and ignoring
55
// everything else.
56
//
57
// We represent the local heap using two mappings:
58
//
59
// - A set of the local allocations present in the function, where
60
//   each of those have a further mapping from
61
//   PromotedLocationDescriptor to local allocations they must point
62
//   to.
63
//
64
// - A "pointer" mapping from nodes to local allocations, if they must
65
//   be equal to said local allocation and are currently live. This
66
//   can be because the node is the actual node that created the
67
//   allocation, or any other node that must currently point to it -
68
//   we don't make a difference.
69
//
70
// The following graph is a motivation for why we separate allocations
71
// from pointers:
72
//
73
// Block #0
74
//  0: NewObject({})
75
//  1: NewObject({})
76
//  -: PutByOffset(@0, @1, x)
77
//  -: PutStructure(@0, {x:0})
78
//  2: GetByOffset(@0, x)
79
//  -: Jump(#1)
80
//
81
// Block #1
82
//  -: Return(@2)
83
//
84
// Here, we need to remember in block #1 that @2 points to a local
85
// allocation with appropriate fields and structures information
86
// (because we should be able to place a materialization on top of
87
// block #1 here), even though @1 is dead. We *could* just keep @1
88
// artificially alive here, but there is no real reason to do it:
89
// after all, by the end of block #0, @1 and @2 should be completely
90
// interchangeable, and there is no reason for us to artificially make
91
// @1 more important.
92
//
93
// An important point to consider to understand this separation is
94
// that we should think of the local heap as follow: we have a
95
// bunch of nodes that are pointers to "allocations" that live
96
// someplace on the heap, and those allocations can have pointers in
97
// between themselves as well. We shouldn't care about whatever
98
// names we give to the allocations ; what matters when
99
// comparing/merging two heaps is the isomorphism/comparison between
100
// the allocation graphs as seen by the nodes.
101
//
102
// For instance, in the following graph:
103
//
104
// Block #0
105
//  0: NewObject({})
106
//  -: Branch(#1, #2)
107
//
108
// Block #1
109
//  1: NewObject({})
110
//  -: PutByOffset(@0, @1, x)
111
//  -: PutStructure(@0, {x:0})
112
//  -: Jump(#3)
113
//
114
// Block #2
115
//  2: NewObject({})
116
//  -: PutByOffset(@2, undefined, x)
117
//  -: PutStructure(@2, {x:0})
118
//  -: PutByOffset(@0, @2, x)
119
//  -: PutStructure(@0, {x:0})
120
//  -: Jump(#3)
121
//
122
// Block #3
123
//  -: Return(@0)
124
//
125
// we should think of the heaps at tail of blocks #1 and #2 as being
126
// exactly the same, even though one has @0.x pointing to @1 and the
127
// other has @0.x pointing to @2, because in essence this should not
128
// be different from the graph where we hoisted @1 and @2 into a
129
// single allocation in block #0. We currently will not handle this
130
// case, because we merge allocations based on the node they are
131
// coming from, but this is only a technicality for the sake of
132
// simplicity that shouldn't hide the deeper idea outlined here.
133
134
class Allocation {
135
public:
136
    // We use Escaped as a special allocation kind because when we
137
    // decide to sink an allocation, we still need to keep track of it
138
    // once it is escaped if it still has pointers to it in order to
139
    // replace any use of those pointers by the corresponding
140
    // materialization
141
    enum class Kind { Escaped, Object, Activation, Function };
142
143
    explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
144
        : m_identifier(identifier)
145
        , m_kind(kind)
146
    {
147
    }
148
149
150
    const HashMap<PromotedLocationDescriptor, Node*>& fields() const
151
    {
152
        return m_fields;
153
    }
154
155
    Node* get(PromotedLocationDescriptor descriptor)
156
    {
157
        return m_fields.get(descriptor);
158
    }
159
160
    Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
161
    {
162
        // Pointing to anything else than an unescaped local
163
        // allocation is represented by simply not having the
164
        // field
165
        if (value)
166
            m_fields.set(descriptor, value);
167
        else
168
            m_fields.remove(descriptor);
169
        return *this;
170
    }
171
172
    void remove(PromotedLocationDescriptor descriptor)
173
    {
174
        set(descriptor, nullptr);
175
    }
176
177
    bool hasStructures() const
178
    {
179
        switch (kind()) {
180
        case Kind::Object:
181
            return true;
182
183
        default:
184
            return false;
185
        }
186
    }
187
188
    Allocation& setStructures(const StructureSet& structures)
189
    {
190
        ASSERT(hasStructures() && !structures.isEmpty());
191
        m_structures = structures;
192
        return *this;
193
    }
194
195
    Allocation& mergeStructures(const StructureSet& structures)
196
    {
197
        ASSERT(hasStructures() || structures.isEmpty());
198
        m_structures.merge(structures);
199
        return *this;
200
    }
201
202
    Allocation& filterStructures(const StructureSet& structures)
203
    {
204
        ASSERT(hasStructures());
205
        m_structures.filter(structures);
206
        return *this;
207
    }
208
209
    const StructureSet& structures() const
210
    {
211
        return m_structures;
212
    }
213
214
    Node* identifier() const { return m_identifier; }
215
216
    Kind kind() const { return m_kind; }
217
218
    bool isEscapedAllocation() const
219
    {
220
        return kind() == Kind::Escaped;
221
    }
222
223
    bool isObjectAllocation() const
224
    {
225
        return m_kind == Kind::Object;
226
    }
227
228
    bool isActivationAllocation() const
229
    {
230
        return m_kind == Kind::Activation;
231
    }
232
233
    bool isFunctionAllocation() const
234
    {
235
        return m_kind == Kind::Function;
236
    }
237
238
    bool operator==(const Allocation& other) const
239
    {
240
        return m_identifier == other.m_identifier
241
            && m_kind == other.m_kind
242
            && m_fields == other.m_fields
243
            && m_structures == other.m_structures;
244
    }
245
246
    bool operator!=(const Allocation& other) const
247
    {
248
        return !(*this == other);
249
    }
250
251
    void dump(PrintStream& out) const
252
    {
253
        dumpInContext(out, nullptr);
254
    }
255
256
    void dumpInContext(PrintStream& out, DumpContext* context) const
257
    {
258
        switch (m_kind) {
259
        case Kind::Escaped:
260
            out.print("Escaped");
261
            break;
262
263
        case Kind::Object:
264
            out.print("Object");
265
            break;
266
267
        case Kind::Function:
268
            out.print("Function");
269
            break;
270
271
        case Kind::Activation:
272
            out.print("Activation");
273
            break;
274
        }
275
        out.print("Allocation(");
276
        if (!m_structures.isEmpty())
277
            out.print(inContext(m_structures, context));
278
        if (!m_fields.isEmpty()) {
279
            if (!m_structures.isEmpty())
280
                out.print(", ");
281
            out.print(mapDump(m_fields, " => #", ", "));
282
        }
283
        out.print(")");
284
    }
285
286
private:
287
    Node* m_identifier; // This is the actual node that created the allocation
288
    Kind m_kind;
289
    HashMap<PromotedLocationDescriptor, Node*> m_fields;
290
    StructureSet m_structures;
291
};
292
293
class LocalHeap {
294
public:
295
    Allocation& newAllocation(Node* node, Allocation::Kind kind)
296
    {
297
        ASSERT(!m_pointers.contains(node) && !isAllocation(node));
298
        m_pointers.add(node, node);
299
        return m_allocations.set(node, Allocation(node, kind)).iterator->value;
300
    }
301
302
    bool isAllocation(Node* identifier) const
303
    {
304
        return m_allocations.contains(identifier);
305
    }
306
307
    // Note that this is fundamentally different from
308
    // onlyLocalAllocation() below. getAllocation() takes as argument
309
    // a node-as-identifier, that is, an allocation node. This
310
    // allocation node doesn't have to be alive; it may only be
311
    // pointed to by other nodes or allocation fields.
312
    // For instance, in the following graph:
313
    //
314
    // Block #0
315
    //  0: NewObject({})
316
    //  1: NewObject({})
317
    //  -: PutByOffset(@0, @1, x)
318
    //  -: PutStructure(@0, {x:0})
319
    //  2: GetByOffset(@0, x)
320
    //  -: Jump(#1)
321
    //
322
    // Block #1
323
    //  -: Return(@2)
324
    //
325
    // At head of block #1, the only reachable allocation is #@1,
326
    // which can be reached through node @2. Thus, getAllocation(#@1)
327
    // contains the appropriate metadata for this allocation, but
328
    // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
329
    // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
330
    // is the same as getAllocation(#@1), while getAllocation(#@2)
331
    // does not make sense since @2 is not an allocation node.
332
    //
333
    // This is meant to be used when the node is already known to be
334
    // an identifier (i.e. an allocation) - probably because it was
335
    // found as value of a field or pointer in the current heap, or
336
    // was the result of a call to follow(). In any other cases (such
337
    // as when doing anything while traversing the graph), the
338
    // appropriate function to call is probably onlyLocalAllocation.
339
    Allocation& getAllocation(Node* identifier)
340
    {
341
        auto iter = m_allocations.find(identifier);
342
        ASSERT(iter != m_allocations.end());
343
        return iter->value;
344
    }
345
346
    void newPointer(Node* node, Node* identifier)
347
    {
348
        ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
349
        ASSERT(isAllocation(identifier));
350
        m_pointers.add(node, identifier);
351
    }
352
353
    // follow solves the points-to problem. Given a live node, which
354
    // may be either an allocation itself or a heap read (e.g. a
355
    // GetByOffset node), it returns the corresponding allocation
356
    // node, if there is one. If the argument node is neither an
357
    // allocation or a heap read, or may point to different nodes,
358
    // nullptr will be returned. Note that a node that points to
359
    // different nodes can never point to an unescaped local
360
    // allocation.
361
    Node* follow(Node* node) const
362
    {
363
        auto iter = m_pointers.find(node);
364
        ASSERT(iter == m_pointers.end() || m_allocations.contains(iter->value));
365
        return iter == m_pointers.end() ? nullptr : iter->value;
366
    }
367
368
    Node* follow(PromotedHeapLocation location) const
369
    {
370
        const Allocation& base = m_allocations.find(location.base())->value;
371
        auto iter = base.fields().find(location.descriptor());
372
373
        if (iter == base.fields().end())
374
            return nullptr;
375
376
        return iter->value;
377
    }
378
379
    // onlyLocalAllocation find the corresponding allocation metadata
380
    // for any live node. onlyLocalAllocation(node) is essentially
381
    // getAllocation(follow(node)), with appropriate null handling.
382
    Allocation* onlyLocalAllocation(Node* node)
383
    {
384
        Node* identifier = follow(node);
385
        if (!identifier)
386
            return nullptr;
387
388
        return &getAllocation(identifier);
389
    }
390
391
    Allocation* onlyLocalAllocation(PromotedHeapLocation location)
392
    {
393
        Node* identifier = follow(location);
394
        if (!identifier)
395
            return nullptr;
396
397
        return &getAllocation(identifier);
398
    }
399
400
    // This allows us to store the escapees only when necessary. If
401
    // set, the current escapees can be retrieved at any time using
402
    // takeEscapees(), which will clear the cached set of escapees;
403
    // otherwise the heap won't remember escaping allocations.
404
    void setWantEscapees()
405
    {
406
        m_wantEscapees = true;
407
    }
408
409
    HashMap<Node*, Allocation> takeEscapees()
410
    {
411
        return WTF::move(m_escapees);
412
    }
413
414
    void escape(Node* node)
415
    {
416
        Node* identifier = follow(node);
417
        if (!identifier)
418
            return;
419
420
        escapeAllocation(identifier);
421
    }
422
423
    void merge(const LocalHeap& other)
424
    {
425
        assertIsValid();
426
        other.assertIsValid();
427
        ASSERT(!m_wantEscapees);
428
429
        if (!reached()) {
430
            ASSERT(other.reached());
431
            *this = other;
432
            return;
433
        }
434
435
        HashSet<Node*> toEscape;
436
437
        for (auto& allocationEntry : other.m_allocations)
438
            m_allocations.add(allocationEntry.key, allocationEntry.value);
439
        for (auto& allocationEntry : m_allocations) {
440
            auto allocationIter = other.m_allocations.find(allocationEntry.key);
441
442
            // If we have it and they don't, it died for them but we
443
            // are keeping it alive from another field somewhere.
444
            // There is nothing to do - we will be escaped
445
            // automatically when we handle that other field.
446
            // This will also happen for allocation that we have and
447
            // they don't, and all of those will get pruned.
448
            if (allocationIter == other.m_allocations.end())
449
                continue;
450
451
            if (allocationEntry.value.kind() != allocationIter->value.kind()) {
452
                toEscape.add(allocationEntry.key);
453
                for (const auto& fieldEntry : allocationIter->value.fields())
454
                    toEscape.add(fieldEntry.value);
455
            } else {
456
                mergePointerSets(
457
                    allocationEntry.value.fields(), allocationIter->value.fields(),
458
                    [&] (Node* identifier) {
459
                        toEscape.add(identifier);
460
                    },
461
                    [&] (PromotedLocationDescriptor field) {
462
                        allocationEntry.value.remove(field);
463
                    });
464
                allocationEntry.value.mergeStructures(allocationIter->value.structures());
465
            }
466
        }
467
468
        mergePointerSets(m_pointers, other.m_pointers,
469
            [&] (Node* identifier) {
470
                toEscape.add(identifier);
471
            },
472
            [&] (Node* field) {
473
                m_pointers.remove(field);
474
            });
475
476
        for (Node* identifier : toEscape)
477
            escapeAllocation(identifier);
478
479
        if (!ASSERT_DISABLED) {
480
            for (const auto& entry : m_allocations)
481
                ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
482
        }
483
484
        // If there is no remaining pointer to an allocation, we can
485
        // remove it. This should only happen for escaped allocations,
486
        // because we only merge liveness-pruned heaps in the first
487
        // place.
488
        prune();
489
490
        assertIsValid();
491
    }
492
493
    void pruneByLiveness(const HashSet<Node*>& live)
494
    {
495
        Vector<Node*> toRemove;
496
        for (const auto& entry : m_pointers) {
497
            if (!live.contains(entry.key))
498
                toRemove.append(entry.key);
499
        }
500
        for (Node* node : toRemove)
501
            m_pointers.remove(node);
502
        
503
        prune();
504
    }
505
506
    void assertIsValid() const
507
    {
508
        if (ASSERT_DISABLED)
509
            return;
510
511
        // Pointers should point to an actual allocation
512
        for (const auto& entry : m_pointers) {
513
            ASSERT_UNUSED(entry, entry.value);
514
            ASSERT(m_allocations.contains(entry.value));
515
        }
516
517
        for (const auto& allocationEntry : m_allocations) {
518
            // Fields should point to an actual allocation
519
            for (const auto& fieldEntry : allocationEntry.value.fields()) {
520
                ASSERT_UNUSED(fieldEntry, fieldEntry.value);
521
                ASSERT(m_allocations.contains(fieldEntry.value));
522
            }
523
        }
524
    }
525
526
    bool operator==(const LocalHeap& other) const
527
    {
528
        assertIsValid();
529
        other.assertIsValid();
530
        return m_allocations == other.m_allocations
531
            && m_pointers == other.m_pointers;
532
    }
533
534
    bool operator!=(const LocalHeap& other) const
535
    {
536
        return !(*this == other);
537
    }
538
539
    const HashMap<Node*, Allocation> allocations() const
540
    {
541
        return m_allocations;
542
    }
543
544
    const HashMap<Node*, Node*> pointers() const
545
    {
546
        return m_pointers;
547
    }
548
549
    void dump(PrintStream& out) const
550
    {
551
        out.print("  Allocations:\n");
552
        for (const auto& entry : m_allocations)
553
            out.print("    #", entry.key, ": ", entry.value, "\n");
554
        out.print("  Pointers:\n");
555
        for (const auto& entry : m_pointers)
556
            out.print("    ", entry.key, " => #", entry.value, "\n");
557
    }
558
559
    bool reached() const
560
    {
561
        return m_reached;
562
    }
563
564
    void setReached()
565
    {
566
        m_reached = true;
567
    }
568
569
private:
570
    // When we merge two heaps, we escape all fields of allocations,
571
    // unless they point to the same thing in both heaps.
572
    // The reason for this is that it allows us not to do extra work
573
    // for diamond graphs where we would otherwise have to check
574
    // whether we have a single definition or not, which would be
575
    // cumbersome.
576
    //
577
    // Note that we should try to unify nodes even when they are not
578
    // from the same allocation; for instance we should be able to
579
    // completely eliminate all allocations from the following graph:
580
    //
581
    // Block #0
582
    //  0: NewObject({})
583
    //  -: Branch(#1, #2)
584
    //
585
    // Block #1
586
    //  1: NewObject({})
587
    //  -: PutByOffset(@1, "left", val)
588
    //  -: PutStructure(@1, {val:0})
589
    //  -: PutByOffset(@0, @1, x)
590
    //  -: PutStructure(@0, {x:0})
591
    //  -: Jump(#3)
592
    //
593
    // Block #2
594
    //  2: NewObject({})
595
    //  -: PutByOffset(@2, "right", val)
596
    //  -: PutStructure(@2, {val:0})
597
    //  -: PutByOffset(@0, @2, x)
598
    //  -: PutStructure(@0, {x:0})
599
    //  -: Jump(#3)
600
    //
601
    // Block #3:
602
    //  3: GetByOffset(@0, x)
603
    //  4: GetByOffset(@3, val)
604
    //  -: Return(@4)
605
    template<typename Key, typename EscapeFunctor, typename RemoveFunctor>
606
    void mergePointerSets(
607
        const HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their,
608
        const EscapeFunctor& escape, const RemoveFunctor& remove)
609
    {
610
        Vector<Key> toRemove;
611
        for (const auto& entry : my) {
612
            auto iter = their.find(entry.key);
613
            if (iter == their.end()) {
614
                toRemove.append(entry.key);
615
                escape(entry.value);
616
            } else if (iter->value != entry.value) {
617
                toRemove.append(entry.key);
618
                escape(entry.value);
619
                escape(iter->value);
620
            }
621
        }
622
        for (const auto& entry : their) {
623
            if (my.contains(entry.key))
624
                continue;
625
            escape(entry.value);
626
        }
627
        for (Key key : toRemove)
628
            remove(key);
629
    }
630
631
    void escapeAllocation(Node* identifier)
632
    {
633
        Allocation& allocation = getAllocation(identifier);
634
        if (allocation.isEscapedAllocation())
635
            return;
636
637
        Allocation unescaped = WTF::move(allocation);
638
        allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
639
640
        for (const auto& entry : unescaped.fields())
641
            escapeAllocation(entry.value);
642
643
        if (m_wantEscapees)
644
            m_escapees.add(unescaped.identifier(), WTF::move(unescaped));
645
    }
646
647
    void prune()
648
    {
649
        HashSet<Node*> reachable;
650
        for (const auto& entry : m_pointers)
651
            reachable.add(entry.value);
652
653
        // Repeatedly mark as reachable allocations in fields of other
654
        // reachable allocations
655
        {
656
            Vector<Node*> worklist;
657
            worklist.appendRange(reachable.begin(), reachable.end());
658
659
            while (!worklist.isEmpty()) {
660
                Node* identifier = worklist.takeLast();
661
                Allocation& allocation = m_allocations.find(identifier)->value;
662
                for (const auto& entry : allocation.fields()) {
663
                    if (reachable.add(entry.value).isNewEntry)
664
                        worklist.append(entry.value);
665
                }
666
            }
667
        }
668
669
        // Remove unreachable allocations
670
        {
671
            Vector<Node*> toRemove;
672
            for (const auto& entry : m_allocations) {
673
                if (!reachable.contains(entry.key))
674
                    toRemove.append(entry.key);
675
            }
676
            for (Node* identifier : toRemove)
677
                m_allocations.remove(identifier);
678
        }
679
    }
680
681
    bool m_reached = false;
682
    HashMap<Node*, Node*> m_pointers;
683
    HashMap<Node*, Allocation> m_allocations;
684
685
    bool m_wantEscapees = false;
686
    HashMap<Node*, Allocation> m_escapees;
687
};
688
689
class AllocationCycleSinkingPhase : public Phase {
690
public:
691
    AllocationCycleSinkingPhase(Graph& graph)
692
        : Phase(graph, "object allocation elimination")
693
        , m_pointerSSA(graph)
694
        , m_allocationSSA(graph)
695
        , m_insertionSet(graph)
696
    {
697
    }
698
699
    bool run()
700
    {
701
        ASSERT(m_graph.m_form == SSA);
702
        ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
703
704
        if (!performSinking())
705
            return false;
706
707
        if (verbose) {
708
            dataLog("Graph after elimination:\n");
709
            m_graph.dump();
710
        }
711
712
        return true;
713
    }
714
715
private:
716
    bool performSinking()
717
    {
718
        m_graph.computeRefCounts();
719
        m_graph.initializeNodeOwners();
720
        performLivenessAnalysis(m_graph);
721
        performOSRAvailabilityAnalysis(m_graph);
722
        m_combinedLiveness = CombinedLiveness(m_graph);
723
724
        CString graphBeforeSinking;
725
        if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
726
            StringPrintStream out;
727
            m_graph.dump(out);
728
            graphBeforeSinking = out.toCString();
729
        }
730
731
        if (verbose) {
732
            dataLog("Graph before elimination:\n");
733
            m_graph.dump();
734
        }
735
736
        performAnalysis();
737
738
        if (!determineSinkCandidates())
739
            return false;
740
741
        if (verbose) {
742
            for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
743
                dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
744
                dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
745
            }
746
        }
747
748
        promoteLocalHeap();
749
750
        if (Options::validateGraphAtEachPhase())
751
            validate(m_graph, DumpGraph, graphBeforeSinking);
752
        return true;
753
    }
754
755
    void performAnalysis()
756
    {
757
        m_heapAtHead = BlockMap<LocalHeap>(m_graph);
758
        m_heapAtTail = BlockMap<LocalHeap>(m_graph);
759
760
        bool changed;
761
        do {
762
            if (verbose)
763
                dataLog("Doing iteration of escape analysis.\n");
764
            changed = false;
765
766
            for (BasicBlock* block : m_graph.blocksInPreOrder()) {
767
                m_heapAtHead[block].setReached();
768
                m_heap = m_heapAtHead[block];
769
770
                for (Node* node : *block) {
771
                    handleNode(
772
                        node,
773
                        [] (PromotedHeapLocation, LazyNode) { },
774
                        [&] (PromotedHeapLocation) -> Node* {
775
                            return nullptr;
776
                        });
777
                }
778
779
                if (m_heap == m_heapAtTail[block])
780
                    continue;
781
782
                m_heapAtTail[block] = m_heap;
783
                changed = true;
784
785
                m_heap.assertIsValid();
786
787
                // We keep only pointers that are live, and only
788
                // allocations that are either live, pointed to by a
789
                // live pointer, or (recursively) stored in a field of
790
                // a live allocation.
791
                //
792
                // This means we can accidentaly leak non-dominating
793
                // nodes into the successor. However, due to the
794
                // non-dominance property, we are guaranteed that the
795
                // successor has at least one predecessor that is not
796
                // dominated either: this means any reference to a
797
                // non-dominating allocation in the successor will
798
                // trigger an escape and get pruned during the merge.
799
                m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
800
801
                for (BasicBlock* successorBlock : block->successors())
802
                    m_heapAtHead[successorBlock].merge(m_heap);
803
            }
804
        } while (changed);
805
    }
806
807
    template<typename WriteFunctor, typename ResolveFunctor>
808
    void handleNode(
809
        Node* node,
810
        const WriteFunctor& heapWrite,
811
        const ResolveFunctor& heapResolve)
812
    {
813
        m_heap.assertIsValid();
814
        ASSERT(m_heap.takeEscapees().isEmpty());
815
816
        Allocation* target = nullptr;
817
        HashMap<PromotedLocationDescriptor, LazyNode> writes;
818
        PromotedLocationDescriptor exactRead;
819
820
        switch (node->op()) {
821
        case NewObject:
822
            target = &m_heap.newAllocation(node, Allocation::Kind::Object);
823
            target->setStructures(node->structure());
824
            writes.add(
825
                StructurePLoc, LazyNode(m_graph.freeze(node->structure())));
826
            break;
827
828
        case MaterializeNewObject: {
829
            target = &m_heap.newAllocation(node, Allocation::Kind::Object);
830
            target->setStructures(node->structureSet());
831
            writes.add(
832
                StructurePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
833
            for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
834
                writes.add(
835
                    PromotedLocationDescriptor(
836
                        NamedPropertyPLoc,
837
                        node->objectMaterializationData().m_properties[i].m_identifierNumber),
838
                    LazyNode(m_graph.varArgChild(node, i + 1).node()));
839
            }
840
            break;
841
        }
842
843
        case NewFunction: {
844
            if (node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid()) {
845
                m_heap.escape(node->child1().node());
846
                break;
847
            }
848
            target = &m_heap.newAllocation(node, Allocation::Kind::Function);
849
            writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
850
            writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
851
            break;
852
        }
853
854
        case CreateActivation: {
855
            if (node->castOperand<SymbolTable*>()->singletonScope()->isStillValid()) {
856
                m_heap.escape(node->child1().node());
857
                break;
858
            }
859
            target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
860
            writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
861
            writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
862
            {
863
                SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
864
                ConcurrentJITLocker locker(symbolTable->m_lock);
865
                LazyNode undefined(m_graph.freeze(jsUndefined()));
866
                for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
867
                    writes.add(
868
                        PromotedLocationDescriptor(ClosureVarPLoc, iter->value.scopeOffset().offset()),
869
                        undefined);
870
                }
871
            }
872
            break;
873
        }
874
875
        case MaterializeCreateActivation: {
876
            // We have sunk this once already - there is no way the
877
            // watchpoint is still valid.
878
            ASSERT(!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid());
879
            target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
880
            writes.add(ActivationSymbolTablePLoc, LazyNode(m_graph.varArgChild(node, 0).node()));
881
            writes.add(ActivationScopePLoc, LazyNode(m_graph.varArgChild(node, 1).node()));
882
            for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
883
                writes.add(
884
                    PromotedLocationDescriptor(
885
                        ClosureVarPLoc,
886
                        node->objectMaterializationData().m_properties[i].m_identifierNumber),
887
                    LazyNode(m_graph.varArgChild(node, i + 2).node()));
888
            }
889
            break;
890
        }
891
892
        case PutStructure:
893
            target = m_heap.onlyLocalAllocation(node->child1().node());
894
            if (target && target->isObjectAllocation()) {
895
                writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next))));
896
                target->setStructures(node->transition()->next);
897
            } else
898
                m_heap.escape(node->child1().node());
899
            break;
900
901
        case CheckStructure: {
902
            Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
903
            if (allocation && allocation->isObjectAllocation()) {
904
                allocation->filterStructures(node->structureSet());
905
                if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
906
                    node->convertToCheckStructureImmediate(value);
907
            } else
908
                m_heap.escape(node->child1().node());
909
            break;
910
        }
911
912
        case GetByOffset:
913
        case GetGetterSetterByOffset:
914
            target = m_heap.onlyLocalAllocation(node->child2().node());
915
            if (target && target->isObjectAllocation()) {
916
                unsigned identifierNumber = node->storageAccessData().identifierNumber;
917
                exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
918
            } else {
919
                m_heap.escape(node->child1().node());
920
                m_heap.escape(node->child2().node());
921
            }
922
            break;
923
924
        case MultiGetByOffset:
925
            target = m_heap.onlyLocalAllocation(node->child1().node());
926
            if (target && target->isObjectAllocation()) {
927
                unsigned identifierNumber = node->multiGetByOffsetData().identifierNumber;
928
                exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
929
            } else
930
                m_heap.escape(node->child1().node());
931
            break;
932
933
        case PutByOffset:
934
            target = m_heap.onlyLocalAllocation(node->child2().node());
935
            if (target && target->isObjectAllocation()) {
936
                unsigned identifierNumber = node->storageAccessData().identifierNumber;
937
                writes.add(
938
                    PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
939
                    LazyNode(node->child3().node()));
940
            } else {
941
                m_heap.escape(node->child1().node());
942
                m_heap.escape(node->child2().node());
943
                m_heap.escape(node->child3().node());
944
            }
945
            break;
946
947
        case GetClosureVar:
948
            target = m_heap.onlyLocalAllocation(node->child1().node());
949
            if (target && target->isActivationAllocation()) {
950
                exactRead =
951
                    PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
952
            } else
953
                m_heap.escape(node->child1().node());
954
            break;
955
956
        case PutClosureVar:
957
            target = m_heap.onlyLocalAllocation(node->child1().node());
958
            if (target && target->isActivationAllocation()) {
959
                writes.add(
960
                    PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
961
                    LazyNode(node->child2().node()));
962
            } else {
963
                m_heap.escape(node->child1().node());
964
                m_heap.escape(node->child2().node());
965
            }
966
            break;
967
968
        case SkipScope:
969
            target = m_heap.onlyLocalAllocation(node->child1().node());
970
            if (target && target->isActivationAllocation())
971
                exactRead = ActivationScopePLoc;
972
            else
973
                m_heap.escape(node->child1().node());
974
            break;
975
976
        case GetExecutable:
977
            target = m_heap.onlyLocalAllocation(node->child1().node());
978
            if (target && target->isFunctionAllocation())
979
                exactRead = FunctionExecutablePLoc;
980
            else
981
                m_heap.escape(node->child1().node());
982
            break;
983
984
        case GetScope:
985
            target = m_heap.onlyLocalAllocation(node->child1().node());
986
            if (target && target->isFunctionAllocation())
987
                exactRead = FunctionActivationPLoc;
988
            else
989
                m_heap.escape(node->child1().node());
990
            break;
991
992
        case Check:
993
            m_graph.doToChildren(
994
                node,
995
                [&] (Edge edge) {
996
                    if (edge.willNotHaveCheck())
997
                        return;
998
999
                    if (alreadyChecked(edge.useKind(), SpecObject))
1000
                        return;
1001
1002
                    m_heap.escape(edge.node());
1003
                });
1004
            break;
1005
1006
        case MovHint:
1007
        case PutHint:
1008
            // Handled by OSR availability analysis
1009
            break;
1010
1011
        default:
1012
            m_graph.doToChildren(
1013
                node,
1014
                [&] (Edge edge) {
1015
                    m_heap.escape(edge.node());
1016
                });
1017
            break;
1018
        }
1019
1020
        if (exactRead) {
1021
            ASSERT(target);
1022
            ASSERT(writes.isEmpty());
1023
            if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
1024
                ASSERT(!value->replacement());
1025
                node->replaceWith(value);
1026
            }
1027
            Node* identifier = target->get(exactRead);
1028
            if (identifier)
1029
                m_heap.newPointer(node, identifier);
1030
        }
1031
1032
        for (auto entry : writes) {
1033
            ASSERT(target);
1034
            if (entry.value.isNode())
1035
                target->set(entry.key, m_heap.follow(entry.value.asNode()));
1036
            else
1037
                target->remove(entry.key);
1038
            heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
1039
        }
1040
1041
        m_heap.assertIsValid();
1042
    }
1043
1044
    bool determineSinkCandidates()
1045
    {
1046
        m_sinkCandidates.clear();
1047
        m_materializationToEscapee.clear();
1048
        m_materializationSiteToMaterializations.clear();
1049
        m_materializationSiteToRecoveries.clear();
1050
1051
        // Logically we wish to consider every allocation and sink
1052
        // it. However, it is probably not profitable to sink an
1053
        // allocation that will always escape. So, we only sink an
1054
        // allocation if one of the following is true:
1055
        //
1056
        // 1) There exists a basic block with only backwards outgoing
1057
        //    edges (or no outgoing edges) in which the node wasn't
1058
        //    materialized. This is meant to catch
1059
        //    effectively-infinite loops in which we don't need to
1060
        //    have allocated the object.
1061
        //
1062
        // 2) There exists a basic block at the tail of which the node
1063
        //    is dead and not materialized.
1064
        //
1065
        // 3) The sum of execution counts of the materializations is
1066
        //    less than the sum of execution counts of the original
1067
        //    node.
1068
        //
1069
        // We currently implement only rule #2.
1070
        // FIXME: Implement the two other rules.
1071
        // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
1072
        // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
1073
        //
1074
        // However, these rules allow for a sunk object to be put into
1075
        // a non-sunk one, which we don't support. We could solve this
1076
        // by supporting PutHints on local allocations, making these
1077
        // objects only partially correct, and we would need to adapt
1078
        // the OSR availability analysis and OSR exit to handle
1079
        // this. This would be totally doable, but would create a
1080
        // super rare, and thus bug-prone, code path.
1081
        // So, instead, we need to implement one of the following
1082
        // closure rules:
1083
        //
1084
        // 1) If we put a sink candidate into a local allocation that
1085
        //    is not a sink candidate, change our minds and don't
1086
        //    actually sink the sink candidate.
1087
        //
1088
        // 2) If we put a sink candidate into a local allocation, that
1089
        //    allocation becomes a sink candidate as well.
1090
        //
1091
        // We currently choose to implement closure rule #2.
1092
        HashMap<Node*, Vector<Node*>> dependencies;
1093
        bool hasUnescapedReads = false;
1094
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1095
            m_heap = m_heapAtHead[block];
1096
1097
            for (Node* node : *block) {
1098
                handleNode(
1099
                    node,
1100
                    [&] (PromotedHeapLocation location, LazyNode value) {
1101
                        if (!value.isNode())
1102
                            return;
1103
1104
                        Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
1105
                        if (allocation && !allocation->isEscapedAllocation())
1106
                            dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
1107
                    },
1108
                    [&] (PromotedHeapLocation) -> Node* {
1109
                        hasUnescapedReads = true;
1110
                        return nullptr;
1111
                    });
1112
            }
1113
1114
            // The sink candidates are initially the unescaped
1115
            // allocations dying at tail of blocks
1116
            HashSet<Node*> allocations;
1117
            for (const auto& entry : m_heap.allocations()) {
1118
                if (!entry.value.isEscapedAllocation())
1119
                    allocations.add(entry.key);
1120
            }
1121
1122
            m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1123
1124
            for (Node* identifier : allocations) {
1125
                if (!m_heap.isAllocation(identifier))
1126
                    m_sinkCandidates.add(identifier);
1127
            }
1128
        }
1129
1130
        // Ensure that the set of sink candidates is closed for put operations
1131
        Vector<Node*> worklist;
1132
        worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
1133
1134
        while (!worklist.isEmpty()) {
1135
            for (Node* identifier : dependencies.get(worklist.takeLast())) {
1136
                if (m_sinkCandidates.add(identifier).isNewEntry)
1137
                    worklist.append(identifier);
1138
            }
1139
        }
1140
1141
        if (m_sinkCandidates.isEmpty())
1142
            return hasUnescapedReads;
1143
1144
        if (verbose)
1145
            dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
1146
1147
        // Create the materialization nodes
1148
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1149
            m_heap = m_heapAtHead[block];
1150
            m_heap.setWantEscapees();
1151
1152
            for (Node* node : *block) {
1153
                handleNode(
1154
                    node,
1155
                    [] (PromotedHeapLocation, LazyNode) { },
1156
                    [] (PromotedHeapLocation) -> Node* {
1157
                        return nullptr;
1158
                    });
1159
                auto escapees = m_heap.takeEscapees();
1160
                if (!escapees.isEmpty())
1161
                    placeMaterializations(escapees, node);
1162
            }
1163
1164
            m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
1165
1166
            {
1167
                HashMap<Node*, Allocation> escapingOnEdge;
1168
                for (const auto& entry : m_heap.allocations()) {
1169
                    if (entry.value.isEscapedAllocation())
1170
                        continue;
1171
1172
                    bool mustEscape = false;
1173
                    for (BasicBlock* successorBlock : block->successors()) {
1174
                        if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
1175
                            || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
1176
                            mustEscape = true;
1177
                    }
1178
1179
                    if (mustEscape)
1180
                        escapingOnEdge.add(entry.key, entry.value);
1181
                }
1182
                placeMaterializations(WTF::move(escapingOnEdge), block->terminal());
1183
            }
1184
        }
1185
1186
        return hasUnescapedReads || !m_sinkCandidates.isEmpty();
1187
    }
1188
1189
    void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
1190
    {
1191
        // We don't create materializations if the escapee is not a
1192
        // sink candidate
1193
        Vector<Node*> toRemove;
1194
        for (const auto& entry : escapees) {
1195
            if (!m_sinkCandidates.contains(entry.key))
1196
                toRemove.append(entry.key);
1197
        }
1198
        for (Node* identifier : toRemove)
1199
            escapees.remove(identifier);
1200
1201
        if (escapees.isEmpty())
1202
            return;
1203
1204
        // First collect the hints that will be needed when the node
1205
        // we materialize is still stored into other unescaped sink candidates
1206
        Vector<PromotedHeapLocation> hints;
1207
        for (const auto& entry : m_heap.allocations()) {
1208
            if (escapees.contains(entry.key))
1209
                continue;
1210
1211
            for (const auto& field : entry.value.fields()) {
1212
                ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
1213
                if (escapees.contains(field.value) && !field.key.neededForMaterialization())
1214
                    hints.append(PromotedHeapLocation(entry.key, field.key));
1215
            }
1216
        }
1217
1218
        // Now we need to order the materialization. Any order is
1219
        // valid (as long as we materialize a node first if it is
1220
        // needed for the materialization of another node, e.g. a
1221
        // function's activation must be materialized before the
1222
        // function itself), but we want to try minimizing the number
1223
        // of times we have to place Puts to close cycles after a
1224
        // materialization. In other words, we are trying to find the
1225
        // minimum number of materializations to remove from the
1226
        // materialization graph to make it a DAG, known as the
1227
        // (vertex) feedback set problem. Unfortunately, this is a
1228
        // NP-hard problem, which we don't want to solve exactly.
1229
        //
1230
        // Instead, we use a simple greedy procedure, that procedes as
1231
        // follow:
1232
        //  - While there is at least one node with no outgoing edge
1233
        //    amongst the remaining materializations, materialize it
1234
        //    first
1235
        // 
1236
        //  - Similarily, while there is at least one node with no
1237
        //    incoming edge amongst the remaining materializations,
1238
        //    materialize it last.
1239
        //
1240
        //  - When both previous conditions are false, we have an
1241
        //    actual cycle, and we need to pick a node to
1242
        //    materialize. We try greedily to remove the "pressure" on
1243
        //    the remaining nodes by choosing the node with maximum
1244
        //    |incoming edges| * |outgoing edges| as a measure of how
1245
        //    "central" to the graph it is. We materialize it first,
1246
        //    so that all the recoveries will be Puts of things into
1247
        //    it (rather than Puts of the materialization into other
1248
        //    objects), which means we will have a single
1249
        //    StoreBarrier.
1250
1251
1252
        // Compute dependencies between materializations
1253
        HashMap<Node*, HashSet<Node*>> dependencies;
1254
        HashMap<Node*, HashSet<Node*>> reverseDependencies;
1255
        HashMap<Node*, HashSet<Node*>> forMaterialization;
1256
        for (const auto& entry : escapees) {
1257
            auto& myDependencies = dependencies.add(entry.key, HashSet<Node*>()).iterator->value;
1258
            auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, HashSet<Node*>()).iterator->value;
1259
            reverseDependencies.add(entry.key, HashSet<Node*>());
1260
            for (const auto& field : entry.value.fields()) {
1261
                if (escapees.contains(field.value) && field.value != entry.key) {
1262
                    myDependencies.add(field.value);
1263
                    reverseDependencies.add(field.value, HashSet<Node*>()).iterator->value.add(entry.key);
1264
                    if (field.key.neededForMaterialization())
1265
                        myDependenciesForMaterialization.add(field.value);
1266
                }
1267
            }
1268
        }
1269
1270
        // Helper function to update the materialized set and the
1271
        // dependencies
1272
        HashSet<Node*> materialized;
1273
        auto materialize = [&] (Node* identifier) {
1274
            materialized.add(identifier);
1275
            for (Node* dep : dependencies.get(identifier))
1276
                reverseDependencies.find(dep)->value.remove(identifier);
1277
            for (Node* rdep : reverseDependencies.get(identifier)) {
1278
                dependencies.find(rdep)->value.remove(identifier);
1279
                forMaterialization.find(rdep)->value.remove(identifier);
1280
            }
1281
            dependencies.remove(identifier);
1282
            reverseDependencies.remove(identifier);
1283
            forMaterialization.remove(identifier);
1284
        };
1285
1286
        // Nodes without remaining unmaterialized fields will be
1287
        // materialized first - amongst the remaining unmaterialized
1288
        // nodes
1289
        std::list<Allocation> toMaterialize;
1290
        auto firstPos = toMaterialize.begin();
1291
        auto materializeFirst = [&] (Allocation&& allocation) {
1292
            materialize(allocation.identifier());
1293
            // We need to insert *after* the current position
1294
            if (firstPos != toMaterialize.end())
1295
                ++firstPos;
1296
            firstPos = toMaterialize.insert(firstPos, WTF::move(allocation));
1297
        };
1298
1299
        // Nodes that no other unmaterialized node points to will be
1300
        // materialized last - amongst the remaining unmaterialized
1301
        // nodes
1302
        auto lastPos = toMaterialize.end();
1303
        auto materializeLast = [&] (Allocation&& allocation) {
1304
            materialize(allocation.identifier());
1305
            lastPos = toMaterialize.insert(lastPos, WTF::move(allocation));
1306
        };
1307
1308
        Vector<PromotedHeapLocation> toRecover;
1309
1310
        while (!escapees.isEmpty()) {
1311
            materialized.clear();
1312
1313
            // Materialize nodes that won't require recoveries if we can
1314
            for (auto& entry : escapees) {
1315
                if (!forMaterialization.find(entry.key)->value.isEmpty())
1316
                    continue;
1317
1318
                if (dependencies.find(entry.key)->value.isEmpty()) {
1319
                    materializeFirst(WTF::move(entry.value));
1320
                    continue;
1321
                }
1322
1323
                if (reverseDependencies.find(entry.key)->value.isEmpty()) {
1324
                    materializeLast(WTF::move(entry.value));
1325
                    continue;
1326
                }
1327
            }
1328
1329
            // We have an actual cycle
1330
            if (materialized.isEmpty()) {
1331
                uint64_t maxEvaluation = 0;
1332
                Allocation* bestAllocation;
1333
                for (auto& entry : escapees) {
1334
                    if (!forMaterialization.find(entry.key)->value.isEmpty())
1335
                        continue;
1336
1337
                    uint64_t evaluation =
1338
                        static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
1339
                    if (evaluation > maxEvaluation) {
1340
                        maxEvaluation = evaluation;
1341
                        bestAllocation = &entry.value;
1342
                    }
1343
                }
1344
                RELEASE_ASSERT(maxEvaluation > 0);
1345
1346
                materializeFirst(WTF::move(*bestAllocation));
1347
            }
1348
            RELEASE_ASSERT(!materialized.isEmpty());
1349
1350
            for (Node* identifier : materialized)
1351
                escapees.remove(identifier);
1352
        }
1353
1354
        materialized.clear();
1355
1356
        HashSet<Node*> escaped;
1357
        for (const Allocation& allocation : toMaterialize)
1358
            escaped.add(allocation.identifier());
1359
        for (const Allocation& allocation : toMaterialize) {
1360
            for (const auto& field : allocation.fields()) {
1361
                if (escaped.contains(field.value) && !materialized.contains(field.value))
1362
                    toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
1363
            }
1364
            materialized.add(allocation.identifier());
1365
        }
1366
1367
        Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
1368
            where, Vector<Node*>()).iterator->value;
1369
1370
        for (const Allocation& allocation : toMaterialize) {
1371
            Node* materialization = createMaterialization(allocation, where);
1372
            materializations.append(materialization);
1373
            m_materializationToEscapee.add(materialization, allocation.identifier());
1374
        }
1375
1376
        if (!toRecover.isEmpty()) {
1377
            m_materializationSiteToRecoveries.add(
1378
                where, Vector<PromotedHeapLocation>()).iterator->value.appendRange(
1379
                    toRecover.begin(), toRecover.end());
1380
        }
1381
1382
        // The hints need to be after the "real" recoveries so that we
1383
        // don't hint not-yet-complete objects
1384
        if (!hints.isEmpty()) {
1385
            m_materializationSiteToRecoveries.add(
1386
                where, Vector<PromotedHeapLocation>()).iterator->value.appendRange(
1387
                    hints.begin(), hints.end());
1388
        }
1389
    }
1390
1391
    Node* createMaterialization(const Allocation& allocation, Node* where)
1392
    {
1393
        // FIXME: This is the only place where we actually use the
1394
        // fact that an allocation's identifier is indeed the node
1395
        // that created the allocation.
1396
        switch (allocation.kind()) {
1397
        case Allocation::Kind::Object: {
1398
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1399
            StructureSet* set = m_graph.addStructureSet(allocation.structures());
1400
1401
            return m_graph.addNode(
1402
                allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
1403
                NodeOrigin(
1404
                    allocation.identifier()->origin.semantic,
1405
                    where->origin.forExit),
1406
                OpInfo(set), OpInfo(data), 0, 0);
1407
        }
1408
1409
        case Allocation::Kind::Function: {
1410
            FrozenValue* executable = allocation.identifier()->cellOperand();
1411
1412
            return m_graph.addNode(
1413
                allocation.identifier()->prediction(), NewFunction,
1414
                NodeOrigin(
1415
                    allocation.identifier()->origin.semantic,
1416
                    where->origin.forExit),
1417
                OpInfo(executable));
1418
            break;
1419
        }
1420
1421
        case Allocation::Kind::Activation: {
1422
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
1423
            FrozenValue* symbolTable = allocation.identifier()->cellOperand();
1424
1425
            return m_graph.addNode(
1426
                allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
1427
                NodeOrigin(
1428
                    allocation.identifier()->origin.semantic,
1429
                    where->origin.forExit),
1430
                OpInfo(symbolTable), OpInfo(data), 0, 0);
1431
        }
1432
1433
        default:
1434
            DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
1435
        }
1436
    }
1437
1438
    void promoteLocalHeap()
1439
    {
1440
        // Collect the set of heap locations that we will be operating
1441
        // over.
1442
        HashSet<PromotedHeapLocation> locations;
1443
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1444
            m_heap = m_heapAtHead[block];
1445
1446
            for (Node* node : *block) {
1447
                handleNode(
1448
                    node,
1449
                    [&] (PromotedHeapLocation location, LazyNode) {
1450
                        // If the location is not on a sink candidate,
1451
                        // we only sink it if it is read
1452
                        if (m_sinkCandidates.contains(location.base()))
1453
                            locations.add(location);
1454
                    },
1455
                    [&] (PromotedHeapLocation location) -> Node* {
1456
                        locations.add(location);
1457
                        return nullptr;
1458
                    });
1459
            }
1460
        }
1461
1462
        // Figure out which locations belong to which allocations.
1463
        m_locationsForAllocation.clear();
1464
        for (PromotedHeapLocation location : locations) {
1465
            auto result = m_locationsForAllocation.add(
1466
                location.base(),
1467
                Vector<PromotedHeapLocation>());
1468
            ASSERT(!result.iterator->value.contains(location));
1469
            result.iterator->value.append(location);
1470
        }
1471
1472
        m_pointerSSA.reset();
1473
        m_allocationSSA.reset();
1474
1475
        // Collect the set of "variables" that we will be sinking.
1476
        m_locationToVariable.clear();
1477
        m_nodeToVariable.clear();
1478
        Vector<Node*> indexToNode;
1479
        Vector<PromotedHeapLocation> indexToLocation;
1480
1481
        for (Node* index : m_sinkCandidates) {
1482
            SSACalculator::Variable* variable = m_allocationSSA.newVariable();
1483
            m_nodeToVariable.add(index, variable);
1484
            ASSERT(indexToNode.size() == variable->index());
1485
            indexToNode.append(index);
1486
        }
1487
1488
        for (PromotedHeapLocation location : locations) {
1489
            SSACalculator::Variable* variable = m_pointerSSA.newVariable();
1490
            m_locationToVariable.add(location, variable);
1491
            ASSERT(indexToLocation.size() == variable->index());
1492
            indexToLocation.append(location);
1493
        }
1494
1495
        // We insert all required constants at top of block 0 so that
1496
        // they are inserted only once and we don't clutter the graph
1497
        // with useless constants everywhere
1498
        HashMap<FrozenValue*, Node*> lazyMapping;
1499
        if (!m_bottom)
1500
            m_bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsNumber(1927));
1501
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
1502
            m_heap = m_heapAtHead[block];
1503
1504
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1505
                Node* node = block->at(nodeIndex);
1506
1507
                // Some named properties can be added conditionally,
1508
                // and that would necessitate bottoms
1509
                for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1510
                    if (location.kind() != NamedPropertyPLoc)
1511
                        continue;
1512
1513
                    SSACalculator::Variable* variable = m_locationToVariable.get(location);
1514
                    m_pointerSSA.newDef(variable, block, m_bottom);
1515
                }
1516
1517
                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1518
                    Node* escapee = m_materializationToEscapee.get(materialization);
1519
                    m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
1520
                }
1521
1522
                if (m_sinkCandidates.contains(node))
1523
                    m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
1524
1525
                handleNode(
1526
                    node,
1527
                    [&] (PromotedHeapLocation location, LazyNode value) {
1528
                        if (!locations.contains(location))
1529
                            return;
1530
1531
                        Node* nodeValue;
1532
                        if (value.isNode())
1533
                            nodeValue = value.asNode();
1534
                        else {
1535
                            auto iter = lazyMapping.find(value.asValue());
1536
                            if (iter != lazyMapping.end())
1537
                                nodeValue = iter->value;
1538
                            else {
1539
                                nodeValue = value.ensureIsNode(
1540
                                    m_insertionSet, m_graph.block(0), 0);
1541
                                lazyMapping.add(value.asValue(), nodeValue);
1542
                            }
1543
                        }
1544
1545
                        SSACalculator::Variable* variable = m_locationToVariable.get(location);
1546
                        m_pointerSSA.newDef(variable, block, nodeValue);
1547
                    },
1548
                    [] (PromotedHeapLocation) -> Node* {
1549
                        return nullptr;
1550
                    });
1551
            }
1552
        }
1553
        m_insertionSet.execute(m_graph.block(0));
1554
1555
        // Run the SSA calculators to create Phis
1556
        m_pointerSSA.computePhis(
1557
            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1558
                PromotedHeapLocation location = indexToLocation[variable->index()];
1559
1560
                // Don't create Phi nodes for fields of dead allocations
1561
                if (!m_heapAtHead[block].isAllocation(location.base()))
1562
                    return nullptr;
1563
1564
                // Don't create Phi nodes once we are escaped
1565
                if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
1566
                    return nullptr;
1567
1568
                // If we point to a single allocation, we will
1569
                // directly use its materialization
1570
                if (m_heapAtHead[block].follow(location))
1571
                    return nullptr;
1572
1573
                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1574
                phiNode->mergeFlags(NodeResultJS);
1575
                return phiNode;
1576
            });
1577
1578
        m_allocationSSA.computePhis(
1579
            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
1580
                Node* identifier = indexToNode[variable->index()];
1581
1582
                // Don't create Phi nodes for dead allocations
1583
                if (!m_heapAtHead[block].isAllocation(identifier))
1584
                    return nullptr;
1585
1586
                // Don't create Phi nodes until we are escaped
1587
                if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
1588
                    return nullptr;
1589
1590
                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
1591
                phiNode->mergeFlags(NodeResultJS);
1592
                return phiNode;
1593
            });
1594
1595
        // Place Phis in the right places, replace all uses of any load with the appropriate
1596
        // value, and create the materialization nodes.
1597
        LocalOSRAvailabilityCalculator availabilityCalculator;
1598
        m_graph.clearReplacements();
1599
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
1600
            m_heap = m_heapAtHead[block];
1601
            availabilityCalculator.beginBlock(block);
1602
1603
            // These mapping tables are intended to be lazy. If
1604
            // something is omitted from the table, it means that
1605
            // there haven't been any local stores to the promoted
1606
            // heap location (or any local materialization).
1607
            m_localMapping.clear();
1608
            m_escapeeToMaterialization.clear();
1609
1610
            // Insert the Phi functions that we had previously
1611
            // created.
1612
            for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
1613
                SSACalculator::Variable* variable = phiDef->variable();
1614
                m_insertionSet.insert(0, phiDef->value());
1615
1616
                PromotedHeapLocation location = indexToLocation[variable->index()];
1617
                m_localMapping.set(location, phiDef->value());
1618
1619
                if (m_sinkCandidates.contains(location.base())) {
1620
                    m_insertionSet.insert(
1621
                        0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
1622
                }
1623
            }
1624
1625
            for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
1626
                SSACalculator::Variable* variable = phiDef->variable();
1627
                m_insertionSet.insert(0, phiDef->value());
1628
1629
                Node* identifier = indexToNode[variable->index()];
1630
                m_escapeeToMaterialization.add(identifier, phiDef->value());
1631
                insertOSRHintsForUpdate(0, NodeOrigin(), availabilityCalculator.m_availability, identifier, phiDef->value());
1632
            }
1633
1634
            if (verbose) {
1635
                dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
1636
                dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
1637
            }
1638
1639
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
1640
                Node* node = block->at(nodeIndex);
1641
                for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
1642
                    if (location.kind() != NamedPropertyPLoc)
1643
                        continue;
1644
1645
                    m_localMapping.set(location, m_bottom);
1646
1647
                    if (m_sinkCandidates.contains(node)) {
1648
                        m_insertionSet.insert(
1649
                            nodeIndex + 1,
1650
                            location.createHint(m_graph, node->origin, m_bottom));
1651
                    }
1652
                }
1653
1654
                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1655
                    Node* escapee = m_materializationToEscapee.get(materialization);
1656
                    populateMaterialization(block, materialization, escapee);
1657
                    m_escapeeToMaterialization.set(escapee, materialization);
1658
                    m_insertionSet.insert(nodeIndex, materialization);
1659
                    if (verbose)
1660
                        dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
1661
                }
1662
1663
                for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
1664
                    m_insertionSet.insert(nodeIndex, createRecovery(block, location, node));
1665
1666
                // We need to put the OSR hints after the recoveries,
1667
                // because we only want the hints once the object is
1668
                // complete
1669
                for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
1670
                    Node* escapee = m_materializationToEscapee.get(materialization);
1671
                    insertOSRHintsForUpdate(
1672
                        nodeIndex, node->origin,
1673
                        availabilityCalculator.m_availability, escapee, materialization);
1674
                }
1675
1676
                if (m_sinkCandidates.contains(node))
1677
                    m_escapeeToMaterialization.set(node, node);
1678
1679
                availabilityCalculator.executeNode(node);
1680
1681
                bool doLower = false;
1682
                handleNode(
1683
                    node,
1684
                    [&] (PromotedHeapLocation location, LazyNode value) {
1685
                        if (!locations.contains(location))
1686
                            return;
1687
1688
                        Node* nodeValue;
1689
                        if (value.isNode())
1690
                            nodeValue = value.asNode();
1691
                        else
1692
                            nodeValue = lazyMapping.get(value.asValue());
1693
1694
                        nodeValue = resolve(block, nodeValue);
1695
1696
                        m_localMapping.set(location, nodeValue);
1697
1698
                        if (!m_sinkCandidates.contains(location.base()))
1699
                            return;
1700
1701
                        doLower = true;
1702
1703
                        m_insertionSet.insert(nodeIndex + 1,
1704
                            location.createHint(m_graph, node->origin, nodeValue));
1705
                    },
1706
                    [&] (PromotedHeapLocation location) -> Node* {
1707
                        return resolve(block, location);
1708
                    });
1709
1710
                if (m_sinkCandidates.contains(node) || doLower) {
1711
                    switch (node->op()) {
1712
                    case NewObject:
1713
                    case MaterializeNewObject:
1714
                        node->convertToPhantomNewObject();
1715
                        break;
1716
1717
                    case NewFunction:
1718
                        node->convertToPhantomNewFunction();
1719
                        break;
1720
1721
                    case CreateActivation:
1722
                    case MaterializeCreateActivation:
1723
                        node->convertToPhantomCreateActivation();
1724
                        break;
1725
1726
                    default:
1727
                        node->remove();
1728
                        break;
1729
                    }
1730
                }
1731
1732
                m_graph.doToChildren(
1733
                    node,
1734
                    [&] (Edge& edge) {
1735
                        edge.setNode(resolve(block, edge.node()));
1736
                    });
1737
            }
1738
1739
            // Gotta drop some Upsilons.
1740
            NodeAndIndex terminal = block->findTerminal();
1741
            size_t upsilonInsertionPoint = terminal.index;
1742
            NodeOrigin upsilonOrigin = terminal.node->origin;
1743
            for (BasicBlock* successorBlock : block->successors()) {
1744
                for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
1745
                    Node* phiNode = phiDef->value();
1746
                    SSACalculator::Variable* variable = phiDef->variable();
1747
                    PromotedHeapLocation location = indexToLocation[variable->index()];
1748
                    Node* incoming = resolve(block, location);
1749
1750
                    m_insertionSet.insertNode(
1751
                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1752
                        OpInfo(phiNode), incoming->defaultEdge());
1753
                }
1754
1755
                for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
1756
                    Node* phiNode = phiDef->value();
1757
                    SSACalculator::Variable* variable = phiDef->variable();
1758
                    Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
1759
1760
                    m_insertionSet.insertNode(
1761
                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
1762
                        OpInfo(phiNode), incoming->defaultEdge());
1763
                }
1764
            }
1765
1766
            m_insertionSet.execute(block);
1767
        }
1768
    }
1769
1770
    Node* resolve(BasicBlock* block, PromotedHeapLocation location)
1771
    {
1772
        // If we are currently pointing to a single local allocation,
1773
        // simply return the associated materialization.
1774
        if (Node* identifier = m_heap.follow(location))
1775
            return getMaterialization(block, identifier);
1776
1777
        if (Node* result = m_localMapping.get(location))
1778
            return result;
1779
1780
        // This implies that there is no local mapping. Find a non-local mapping.
1781
        SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
1782
            block, m_locationToVariable.get(location));
1783
        ASSERT(def);
1784
        ASSERT(def->value());
1785
1786
        Node* result = def->value();
1787
1788
        ASSERT(!result->replacement());
1789
1790
        m_localMapping.add(location, result);
1791
        return result;
1792
    }
1793
1794
    Node* resolve(BasicBlock* block, Node* node)
1795
    {
1796
        // If we are currently pointing to a single local allocation,
1797
        // simply return the associated materialization.
1798
        if (Node* identifier = m_heap.follow(node))
1799
            return getMaterialization(block, identifier);
1800
1801
        if (node->replacement())
1802
            node = node->replacement();
1803
        ASSERT(!node->replacement());
1804
1805
        return node;
1806
    }
1807
1808
    Node* getMaterialization(BasicBlock* block, Node* identifier)
1809
    {
1810
        ASSERT(m_heap.isAllocation(identifier));
1811
        if (!m_sinkCandidates.contains(identifier))
1812
            return identifier;
1813
1814
        if (Node* materialization = m_escapeeToMaterialization.get(identifier))
1815
            return materialization;
1816
1817
        SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
1818
            block, m_nodeToVariable.get(identifier));
1819
        ASSERT(def && def->value());
1820
        m_escapeeToMaterialization.add(identifier, def->value());
1821
        ASSERT(!def->value()->replacement());
1822
        return def->value();
1823
    }
1824
1825
    void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, AvailabilityMap& availability, Node* escapee, Node* materialization)
1826
    {
1827
        // We need to follow() the value in the heap.
1828
        // Consider the following graph:
1829
        //
1830
        // Block #0
1831
        //   0: NewObject({})
1832
        //   1: NewObject({})
1833
        //   -: PutByOffset(@0, @1, x:0)
1834
        //   -: PutStructure(@0, {x:0})
1835
        //   2: GetByOffset(@0, x:0)
1836
        //   -: MovHint(@2, loc1)
1837
        //   -: Branch(#1, #2)
1838
        //
1839
        // Block #1
1840
        //   3: Call(f, @1)
1841
        //   4: Return(@0)
1842
        //
1843
        // Block #2
1844
        //   -: Return(undefined)
1845
        //
1846
        // We need to materialize @1 at @3, and when doing so we need
1847
        // to insert a MovHint for the materialization into loc1 as
1848
        // well.
1849
        // In order to do this, we say that we need to insert an
1850
        // update hint for any availability whose node resolve()s to
1851
        // the materialization.
1852
        for (auto entry : availability.m_heap) {
1853
            if (!entry.value.hasNode())
1854
                continue;
1855
            if (m_heap.follow(entry.value.node()) != escapee)
1856
                continue;
1857
1858
            m_insertionSet.insert(
1859
                nodeIndex, entry.key.createHint(m_graph, origin, materialization));
1860
        }
1861
1862
        for (unsigned i = availability.m_locals.size(); i--;) {
1863
            if (!availability.m_locals[i].hasNode())
1864
                continue;
1865
            if (m_heap.follow(availability.m_locals[i].node()) != escapee)
1866
                continue;
1867
1868
            int operand = availability.m_locals.operandForIndex(i);
1869
            m_insertionSet.insertNode(
1870
                nodeIndex, SpecNone, MovHint, origin, OpInfo(operand),
1871
                materialization->defaultEdge());
1872
        }
1873
    }
1874
1875
    void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
1876
    {
1877
        Allocation& allocation = m_heap.getAllocation(escapee);
1878
        switch (node->op()) {
1879
        case MaterializeNewObject: {
1880
            ObjectMaterializationData& data = node->objectMaterializationData();
1881
            unsigned firstChild = m_graph.m_varArgChildren.size();
1882
1883
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1884
1885
            PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
1886
            ASSERT(locations.contains(structure));
1887
1888
            m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1889
1890
            for (PromotedHeapLocation location : locations) {
1891
                switch (location.kind()) {
1892
                case StructurePLoc:
1893
                    ASSERT(location == structure);
1894
                    break;
1895
1896
                case NamedPropertyPLoc: {
1897
                    ASSERT(location.base() == allocation.identifier());
1898
                    data.m_properties.append(PhantomPropertyValue(location.info()));
1899
                    Node* value = resolve(block, location);
1900
                    if (m_sinkCandidates.contains(value))
1901
                        m_graph.m_varArgChildren.append(m_bottom);
1902
                    else
1903
                        m_graph.m_varArgChildren.append(value);
1904
                    break;
1905
                }
1906
1907
                default:
1908
                    DFG_CRASH(m_graph, node, "Bad location kind");
1909
                }
1910
            }
1911
1912
            node->children = AdjacencyList(
1913
                AdjacencyList::Variable,
1914
                firstChild, m_graph.m_varArgChildren.size() - firstChild);
1915
            break;
1916
        }
1917
1918
        case MaterializeCreateActivation: {
1919
            ObjectMaterializationData& data = node->objectMaterializationData();
1920
1921
            unsigned firstChild = m_graph.m_varArgChildren.size();
1922
1923
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1924
1925
            PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
1926
            ASSERT(locations.contains(symbolTable));
1927
            ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
1928
            m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1929
1930
            PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
1931
            ASSERT(locations.contains(scope));
1932
            m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1933
1934
            for (PromotedHeapLocation location : locations) {
1935
                switch (location.kind()) {
1936
                case ActivationScopePLoc: {
1937
                    ASSERT(location == scope);
1938
                    break;
1939
                }
1940
1941
                case ActivationSymbolTablePLoc: {
1942
                    ASSERT(location == symbolTable);
1943
                    break;
1944
                }
1945
1946
                case ClosureVarPLoc: {
1947
                    ASSERT(location.base() == allocation.identifier());
1948
                    data.m_properties.append(PhantomPropertyValue(location.info()));
1949
                    Node* value = resolve(block, location);
1950
                    if (m_sinkCandidates.contains(value))
1951
                        m_graph.m_varArgChildren.append(m_bottom);
1952
                    else
1953
                        m_graph.m_varArgChildren.append(value);
1954
                    break;
1955
                }
1956
1957
                default:
1958
                    DFG_CRASH(m_graph, node, "Bad location kind");
1959
                }
1960
            }
1961
1962
            node->children = AdjacencyList(
1963
                AdjacencyList::Variable,
1964
                firstChild, m_graph.m_varArgChildren.size() - firstChild);
1965
            break;
1966
        }
1967
1968
        case NewFunction: {
1969
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1970
            ASSERT(locations.size() == 2);
1971
1972
            PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
1973
            ASSERT_UNUSED(executable, locations.contains(executable));
1974
1975
            PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
1976
            ASSERT(locations.contains(activation));
1977
1978
            node->child1() = Edge(resolve(block, activation), KnownCellUse);
1979
            break;
1980
        }
1981
1982
        default:
1983
            DFG_CRASH(m_graph, node, "Bad materialize op");
1984
        }
1985
    }
1986
1987
    Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where)
1988
    {
1989
        if (verbose)
1990
            dataLog("Recovering ", location, " at ", where, "\n");
1991
        ASSERT(location.base()->isPhantomAllocation());
1992
        Node* base = getMaterialization(block, location.base());
1993
        Node* value = resolve(block, location);
1994
1995
        if (verbose)
1996
            dataLog("Base is ", base, " and value is ", value, "\n");
1997
1998
        if (base->isPhantomAllocation()) {
1999
            return PromotedHeapLocation(base, location.descriptor()).createHint(
2000
                m_graph,
2001
                NodeOrigin(
2002
                    base->origin.semantic,
2003
                    where->origin.forExit),
2004
                value);
2005
        }
2006
2007
        switch (location.kind()) {
2008
        case NamedPropertyPLoc: {
2009
            Allocation& allocation = m_heap.getAllocation(location.base());
2010
        
2011
            Vector<Structure*> structures;
2012
            structures.appendRange(allocation.structures().begin(), allocation.structures().end());
2013
            unsigned identifierNumber = location.info();
2014
            UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2015
2016
            std::sort(
2017
                structures.begin(),
2018
                structures.end(),
2019
                [uid] (Structure *a, Structure* b) -> bool {
2020
                    return a->getConcurrently(uid) < b->getConcurrently(uid);
2021
                });
2022
2023
            PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
2024
2025
            if (firstOffset == structures.last()->getConcurrently(uid)) {
2026
                Node* storage = base;
2027
                // FIXME: When we decide to sink objects with a
2028
                // property storage, we should handle non-inline offsets.
2029
                RELEASE_ASSERT(isInlineOffset(firstOffset));
2030
2031
                StorageAccessData* data = m_graph.m_storageAccessData.add();
2032
                data->offset = firstOffset;
2033
                data->identifierNumber = identifierNumber;
2034
2035
                return m_graph.addNode(
2036
                    SpecNone,
2037
                    PutByOffset,
2038
                    where->origin,
2039
                    OpInfo(data),
2040
                    Edge(storage, KnownCellUse),
2041
                    Edge(base, KnownCellUse),
2042
                    value->defaultEdge());
2043
            }
2044
2045
            MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2046
            data->identifierNumber = identifierNumber;
2047
2048
            {
2049
                PropertyOffset currentOffset = firstOffset;
2050
                StructureSet currentSet;
2051
                for (Structure* structure : structures) {
2052
                    PropertyOffset offset = structure->getConcurrently(uid);
2053
                    if (offset != currentOffset) {
2054
                        data->variants.append(
2055
                            PutByIdVariant::replace(currentSet, currentOffset));
2056
                        currentOffset = offset;
2057
                        currentSet.clear();
2058
                    }
2059
                    currentSet.add(structure);
2060
                }
2061
                data->variants.append(PutByIdVariant::replace(currentSet, currentOffset));
2062
            }
2063
2064
            return m_graph.addNode(
2065
                SpecNone,
2066
                MultiPutByOffset,
2067
                NodeOrigin(
2068
                    base->origin.semantic,
2069
                    where->origin.forExit),
2070
                OpInfo(data),
2071
                Edge(base, KnownCellUse),
2072
                value->defaultEdge());
2073
            break;
2074
        }
2075
2076
        case ClosureVarPLoc: {
2077
            return m_graph.addNode(
2078
                SpecNone,
2079
                PutClosureVar,
2080
                NodeOrigin(
2081
                    base->origin.semantic,
2082
                    where->origin.forExit),
2083
                OpInfo(location.info()),
2084
                Edge(base, KnownCellUse),
2085
                value->defaultEdge());
2086
            break;
2087
        }
2088
2089
        default:
2090
            DFG_CRASH(m_graph, base, "Bad location kind");
2091
            break;
2092
        }
2093
    }
2094
2095
    SSACalculator m_pointerSSA;
2096
    SSACalculator m_allocationSSA;
2097
    HashSet<Node*> m_sinkCandidates;
2098
    HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
2099
    HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
2100
    HashMap<PromotedHeapLocation, Node*> m_localMapping;
2101
    HashMap<Node*, Node*> m_escapeeToMaterialization;
2102
    InsertionSet m_insertionSet;
2103
    CombinedLiveness m_combinedLiveness;
2104
2105
    HashMap<Node*, Node*> m_materializationToEscapee;
2106
    HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
2107
    HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
2108
2109
    HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
2110
2111
    BlockMap<LocalHeap> m_heapAtHead;
2112
    BlockMap<LocalHeap> m_heapAtTail;
2113
    LocalHeap m_heap;
2114
2115
    Node* m_bottom = nullptr;
2116
};
2117
2118
}
2119
2120
bool performAllocationCycleSinking(Graph& graph)
2121
{
2122
    SamplingRegion samplingRegion("DFG Allocation Cycle Sinking Phase");
2123
    return runPhase<AllocationCycleSinkingPhase>(graph);
2124
}
2125
2126
} } // namespace JSC::DFG
2127
2128
#endif // ENABLE(DFG_JIT)
- a/Source/JavaScriptCore/dfg/DFGAllocationCycleSinkingPhase.h +48 lines
Line 0 a/Source/JavaScriptCore/dfg/DFGAllocationCycleSinkingPhase.h_sec1
1
/*
2
 * Copyright (C) 2015 Apple Inc. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 * 1. Redistributions of source code must retain the above copyright
8
 *    notice, this list of conditions and the following disclaimer.
9
 * 2. Redistributions in binary form must reproduce the above copyright
10
 *    notice, this list of conditions and the following disclaimer in the
11
 *    documentation and/or other materials provided with the distribution.
12
 *
13
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24
 */
25
26
#ifndef DFGAllocationCycleSinkingPhase_h
27
#define DFGAllocationCycleSinkingPhase_h
28
29
#if ENABLE(DFG_JIT)
30
31
namespace JSC { namespace DFG {
32
33
class Graph;
34
35
// Eliminates allocations allocations that are never used except
36
// locally. This will insert phantom allocations and store hints so
37
// that OSR exit can materialize the objects. Replaces all uses of the
38
// objects' fields with SSA data flow. Contrary to the
39
// ObjectAllocationSinkingPhase, this phase is able to handle cyclic
40
// allocation graphs.
41
42
bool performAllocationCycleSinking(Graph&);
43
44
} } // namespace JSC::DFG
45
46
#endif // ENABLE(DFG_JIT)
47
48
#endif // DFGObjectCycleSinkingPhase_h
- a/Source/JavaScriptCore/dfg/DFGGraph.h -1 lines
Lines 325-331 public: a/Source/JavaScriptCore/dfg/DFGGraph.h_sec1
325
    
325
    
326
    StructureSet* addStructureSet(const StructureSet& structureSet)
326
    StructureSet* addStructureSet(const StructureSet& structureSet)
327
    {
327
    {
328
        ASSERT(structureSet.size());
329
        m_structureSet.append(structureSet);
328
        m_structureSet.append(structureSet);
330
        return &m_structureSet.last();
329
        return &m_structureSet.last();
331
    }
330
    }
- a/Source/JavaScriptCore/dfg/DFGLazyNode.cpp -2 / +1 lines
Lines 38-45 void LazyNode::dump(PrintStream& out) const a/Source/JavaScriptCore/dfg/DFGLazyNode.cpp_sec1
38
        if (isNode())
38
        if (isNode())
39
            out.print("LazyNode:@", asNode()->index());
39
            out.print("LazyNode:@", asNode()->index());
40
        else
40
        else
41
            out.print("LazyNode:FrozenValue:", Graph::opName(op()), ", ", pointerDump(asValue()));
41
            out.print("LazyNode:FrozenValue(", Graph::opName(op()), ", ", pointerDump(asValue()), ")");
42
        out.print(")");
43
    }
42
    }
44
}
43
}
45
44
- a/Source/JavaScriptCore/dfg/DFGNode.h -8 / +3 lines
Lines 1293-1305 struct Node { a/Source/JavaScriptCore/dfg/DFGNode.h_sec1
1293
    FrozenValue* cellOperand()
1293
    FrozenValue* cellOperand()
1294
    {
1294
    {
1295
        ASSERT(hasCellOperand());
1295
        ASSERT(hasCellOperand());
1296
        switch (op()) {
1296
        return reinterpret_cast<FrozenValue*>(m_opInfo);
1297
        case MaterializeCreateActivation:
1298
            return reinterpret_cast<FrozenValue*>(m_opInfo2);
1299
        default:
1300
            return reinterpret_cast<FrozenValue*>(m_opInfo);
1301
        }
1302
        RELEASE_ASSERT_NOT_REACHED();
1303
    }
1297
    }
1304
    
1298
    
1305
    template<typename T>
1299
    template<typename T>
Lines 1359-1364 struct Node { a/Source/JavaScriptCore/dfg/DFGNode.h_sec2
1359
        switch (op()) {
1353
        switch (op()) {
1360
        case CheckStructure:
1354
        case CheckStructure:
1361
        case CheckStructureImmediate:
1355
        case CheckStructureImmediate:
1356
        case MaterializeNewObject:
1362
            return true;
1357
            return true;
1363
        default:
1358
        default:
1364
            return false;
1359
            return false;
Lines 1444-1450 struct Node { a/Source/JavaScriptCore/dfg/DFGNode.h_sec3
1444
    ObjectMaterializationData& objectMaterializationData()
1439
    ObjectMaterializationData& objectMaterializationData()
1445
    {
1440
    {
1446
        ASSERT(hasObjectMaterializationData());
1441
        ASSERT(hasObjectMaterializationData());
1447
        return *reinterpret_cast<ObjectMaterializationData*>(m_opInfo);
1442
        return *reinterpret_cast<ObjectMaterializationData*>(m_opInfo2);
1448
    }
1443
    }
1449
1444
1450
    bool isObjectAllocation()
1445
    bool isObjectAllocation()
- a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp -1 lines
Lines 32-38 a/Source/JavaScriptCore/dfg/DFGOSRAvailabilityAnalysisPhase.cpp_sec1
32
#include "DFGGraph.h"
32
#include "DFGGraph.h"
33
#include "DFGInsertionSet.h"
33
#include "DFGInsertionSet.h"
34
#include "DFGPhase.h"
34
#include "DFGPhase.h"
35
#include "DFGPromoteHeapAccess.h"
36
#include "JSCInlines.h"
35
#include "JSCInlines.h"
37
36
38
namespace JSC { namespace DFG {
37
namespace JSC { namespace DFG {
- a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp -10 / +33 lines
Lines 38-43 a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec1
38
#include "DFGLivenessAnalysisPhase.h"
38
#include "DFGLivenessAnalysisPhase.h"
39
#include "DFGOSRAvailabilityAnalysisPhase.h"
39
#include "DFGOSRAvailabilityAnalysisPhase.h"
40
#include "DFGPhase.h"
40
#include "DFGPhase.h"
41
#include "DFGPhiChildren.h"
41
#include "DFGPromoteHeapAccess.h"
42
#include "DFGPromoteHeapAccess.h"
42
#include "DFGSSACalculator.h"
43
#include "DFGSSACalculator.h"
43
#include "DFGValidate.h"
44
#include "DFGValidate.h"
Lines 94-99 public: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec2
94
        
95
        
95
        while (performSinking()) { }
96
        while (performSinking()) { }
96
        
97
        
98
        fillStructureSets();
99
97
        if (verbose) {
100
        if (verbose) {
98
            dataLog("Graph after sinking:\n");
101
            dataLog("Graph after sinking:\n");
99
            m_graph.dump();
102
            m_graph.dump();
Lines 616-629 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec3
616
                    if (m_sinkCandidates.contains(node)) {
619
                    if (m_sinkCandidates.contains(node)) {
617
                        m_insertionSet.insert(
620
                        m_insertionSet.insert(
618
                            nodeIndex + 1,
621
                            nodeIndex + 1,
619
                            PromotedHeapLocation(ActivationScopePLoc, node).createHint(
622
                            PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
620
                                m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
623
                                m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
621
                        Node* symbolTableNode = m_insertionSet.insertConstant(
622
                            nodeIndex + 1, node->origin, node->cellOperand());
623
                        m_insertionSet.insert(
624
                        m_insertionSet.insert(
624
                            nodeIndex + 1,
625
                            nodeIndex + 1,
625
                            PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
626
                            PromotedHeapLocation(ActivationScopePLoc, node).createHint(
626
                                m_graph, node->origin, symbolTableNode));
627
                                m_graph, node->origin, m_graph.varArgChild(node, 1).node()));
627
                        ObjectMaterializationData& data = node->objectMaterializationData();
628
                        ObjectMaterializationData& data = node->objectMaterializationData();
628
                        for (unsigned i = 0; i < data.m_properties.size(); ++i) {
629
                        for (unsigned i = 0; i < data.m_properties.size(); ++i) {
629
                            unsigned identifierNumber = data.m_properties[i].m_identifierNumber;
630
                            unsigned identifierNumber = data.m_properties[i].m_identifierNumber;
Lines 632-638 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec4
632
                                PromotedHeapLocation(
633
                                PromotedHeapLocation(
633
                                    ClosureVarPLoc, node, identifierNumber).createHint(
634
                                    ClosureVarPLoc, node, identifierNumber).createHint(
634
                                    m_graph, node->origin,
635
                                    m_graph, node->origin,
635
                                    m_graph.varArgChild(node, i + 1).node()));
636
                                    m_graph.varArgChild(node, i + 2).node()));
636
                        }
637
                        }
637
                        node->convertToPhantomCreateActivation();
638
                        node->convertToPhantomCreateActivation();
638
                    }
639
                    }
Lines 831-836 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec5
831
        return def->value();
832
        return def->value();
832
    }
833
    }
833
834
835
    void fillStructureSets()
836
    {
837
        PhiChildren phiChildren(m_graph);
838
839
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
840
            for (Node* node : *block) {
841
                if (node->op() != MaterializeNewObject)
842
                    continue;
843
844
                StructureSet& structures = node->structureSet();
845
                phiChildren.forAllTransitiveIncomingValues(
846
                    m_graph.varArgChild(node, 0).node(),
847
                    [&] (Node* structure) {
848
                        structures.add(structure->castConstant<Structure*>());
849
                    });
850
            }
851
        }
852
    }
853
834
    template<typename SinkCandidateFunctor, typename EscapeFunctor>
854
    template<typename SinkCandidateFunctor, typename EscapeFunctor>
835
    void handleNode(
855
    void handleNode(
836
        Node* node,
856
        Node* node,
Lines 964-976 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec6
964
        case NewObject:
984
        case NewObject:
965
        case MaterializeNewObject: {
985
        case MaterializeNewObject: {
966
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
986
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
987
            StructureSet* structures = m_graph.addStructureSet(StructureSet());
967
            
988
            
968
            result = m_graph.addNode(
989
            result = m_graph.addNode(
969
                escapee->prediction(), Node::VarArg, MaterializeNewObject,
990
                escapee->prediction(), Node::VarArg, MaterializeNewObject,
970
                NodeOrigin(
991
                NodeOrigin(
971
                    escapee->origin.semantic,
992
                    escapee->origin.semantic,
972
                    where->origin.forExit),
993
                    where->origin.forExit),
973
                OpInfo(data), OpInfo(), 0, 0);
994
                OpInfo(structures), OpInfo(data), 0, 0);
974
            break;
995
            break;
975
        }
996
        }
976
997
Lines 993-999 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec7
993
                NodeOrigin(
1014
                NodeOrigin(
994
                    escapee->origin.semantic,
1015
                    escapee->origin.semantic,
995
                    where->origin.forExit),
1016
                    where->origin.forExit),
996
                OpInfo(data), OpInfo(symbolTable), 0, 0);
1017
                OpInfo(symbolTable), OpInfo(data), 0, 0);
997
            break;
1018
            break;
998
        }
1019
        }
999
1020
Lines 1063-1072 private: a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp_sec8
1063
1084
1064
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1085
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1065
1086
1066
            PromotedHeapLocation scope(ActivationScopePLoc, escapee);
1067
            PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, escapee);
1087
            PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, escapee);
1068
            ASSERT(locations.contains(scope));
1088
            ASSERT(locations.contains(symbolTable));
1089
            m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
1069
1090
1091
            PromotedHeapLocation scope(ActivationScopePLoc, escapee);
1092
            ASSERT(locations.contains(scope));
1070
            m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1093
            m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1071
1094
1072
            for (unsigned i = 0; i < locations.size(); ++i) {
1095
            for (unsigned i = 0; i < locations.size(); ++i) {
- a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp.orig +1165 lines
Line 0 a/Source/JavaScriptCore/dfg/DFGObjectAllocationSinkingPhase.cpp.orig_sec1
1
/*
2
 * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without
5
 * modification, are permitted provided that the following conditions
6
 * are met:
7
 * 1. Redistributions of source code must retain the above copyright
8
 *    notice, this list of conditions and the following disclaimer.
9
 * 2. Redistributions in binary form must reproduce the above copyright
10
 *    notice, this list of conditions and the following disclaimer in the
11
 *    documentation and/or other materials provided with the distribution.
12
 *
13
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24
 */
25
26
#include "config.h"
27
#include "DFGObjectAllocationSinkingPhase.h"
28
29
#if ENABLE(DFG_JIT)
30
31
#include "DFGAbstractHeap.h"
32
#include "DFGBlockMapInlines.h"
33
#include "DFGClobberize.h"
34
#include "DFGCombinedLiveness.h"
35
#include "DFGGraph.h"
36
#include "DFGInsertOSRHintsForUpdate.h"
37
#include "DFGInsertionSet.h"
38
#include "DFGLivenessAnalysisPhase.h"
39
#include "DFGOSRAvailabilityAnalysisPhase.h"
40
#include "DFGPhase.h"
41
#include "DFGPromoteHeapAccess.h"
42
#include "DFGSSACalculator.h"
43
#include "DFGValidate.h"
44
#include "JSCInlines.h"
45
46
namespace JSC { namespace DFG {
47
48
static bool verbose = false;
49
50
class ObjectAllocationSinkingPhase : public Phase {
51
public:
52
    ObjectAllocationSinkingPhase(Graph& graph)
53
        : Phase(graph, "object allocation sinking")
54
        , m_ssaCalculator(graph)
55
        , m_insertionSet(graph)
56
    {
57
    }
58
    
59
    bool run()
60
    {
61
        ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
62
        
63
        m_graph.m_dominators.computeIfNecessary(m_graph);
64
        
65
        // Logically we wish to consider every NewObject and sink it. However it's probably not
66
        // profitable to sink a NewObject that will always escape. So, first we do a very simple
67
        // forward flow analysis that determines the set of NewObject nodes that have any chance
68
        // of benefiting from object allocation sinking. Then we fixpoint the following rules:
69
        //
70
        // - For each NewObject, we turn the original NewObject into a PhantomNewObject and then
71
        //   we insert MaterializeNewObject just before those escaping sites that come before any
72
        //   other escaping sites - that is, there is no path between the allocation and those sites
73
        //   that would see any other escape. Note that Upsilons constitute escaping sites. Then we
74
        //   insert additional MaterializeNewObject nodes on Upsilons that feed into Phis that mix
75
        //   materializations and the original PhantomNewObject. We then turn each PutByOffset over a
76
        //   PhantomNewObject into a PutHint.
77
        //
78
        // - We perform the same optimization for MaterializeNewObject. This allows us to cover
79
        //   cases where we had MaterializeNewObject flowing into a PutHint.
80
        //
81
        // We could also add this rule:
82
        //
83
        // - If all of the Upsilons of a Phi have a MaterializeNewObject that isn't used by anyone
84
        //   else, then replace the Phi with the MaterializeNewObject.
85
        //
86
        //   FIXME: Implement this. Note that this totally doable but it requires some gnarly
87
        //   code, and to be effective the pruner needs to be aware of it. Currently any Upsilon
88
        //   is considered to be an escape even by the pruner, so it's unlikely that we'll see
89
        //   many cases of Phi over Materializations.
90
        //   https://bugs.webkit.org/show_bug.cgi?id=136927
91
        
92
        if (!performSinking())
93
            return false;
94
        
95
        while (performSinking()) { }
96
        
97
        if (verbose) {
98
            dataLog("Graph after sinking:\n");
99
            m_graph.dump();
100
        }
101
        
102
        return true;
103
    }
104
105
private:
106
    bool performSinking()
107
    {
108
        m_graph.computeRefCounts();
109
        performLivenessAnalysis(m_graph);
110
        performOSRAvailabilityAnalysis(m_graph);
111
        m_combinedLiveness = CombinedLiveness(m_graph);
112
        
113
        CString graphBeforeSinking;
114
        if (Options::verboseValidationFailure() && Options::validateGraphAtEachPhase()) {
115
            StringPrintStream out;
116
            m_graph.dump(out);
117
            graphBeforeSinking = out.toCString();
118
        }
119
        
120
        if (verbose) {
121
            dataLog("Graph before sinking:\n");
122
            m_graph.dump();
123
        }
124
        
125
        determineMaterializationPoints();
126
        if (m_sinkCandidates.isEmpty())
127
            return false;
128
        
129
        // At this point we are committed to sinking the sinking candidates.
130
        placeMaterializationPoints();
131
        lowerNonReadingOperationsOnPhantomAllocations();
132
        promoteSunkenFields();
133
        
134
        if (Options::validateGraphAtEachPhase())
135
            validate(m_graph, DumpGraph, graphBeforeSinking);
136
        
137
        if (verbose)
138
            dataLog("Sinking iteration changed the graph.\n");
139
        return true;
140
    }
141
    
142
    void determineMaterializationPoints()
143
    {
144
        // The premise of this pass is that if there exists a point in the program where some
145
        // path from a phantom allocation site to that point causes materialization, then *all*
146
        // paths cause materialization. This should mean that there are never any redundant
147
        // materializations.
148
        
149
        m_sinkCandidates.clear();
150
        m_materializationToEscapee.clear();
151
        m_materializationSiteToMaterializations.clear();
152
        
153
        BlockMap<HashMap<Node*, bool>> materializedAtHead(m_graph);
154
        BlockMap<HashMap<Node*, bool>> materializedAtTail(m_graph);
155
        
156
        bool changed;
157
        do {
158
            if (verbose)
159
                dataLog("Doing iteration of materialization point placement.\n");
160
            changed = false;
161
            for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
162
                HashMap<Node*, bool> materialized = materializedAtHead[block];
163
                if (verbose)
164
                    dataLog("    Looking at block ", pointerDump(block), "\n");
165
                for (Node* node : *block) {
166
                    handleNode(
167
                        node,
168
                        [&] () {
169
                            materialized.add(node, false);
170
                        },
171
                        [&] (Node* escapee) {
172
                            auto iter = materialized.find(escapee);
173
                            if (iter != materialized.end()) {
174
                                if (verbose)
175
                                    dataLog("    ", escapee, " escapes at ", node, "\n");
176
                                iter->value = true;
177
                            }
178
                        });
179
                }
180
                
181
                if (verbose)
182
                    dataLog("    Materialized at tail of ", pointerDump(block), ": ", mapDump(materialized), "\n");
183
                
184
                if (materialized == materializedAtTail[block])
185
                    continue;
186
                
187
                materializedAtTail[block] = materialized;
188
                changed = true;
189
                
190
                // Only propagate things to our successors if they are alive in all successors.
191
                // So, we prune materialized-at-tail to only include things that are live.
192
                Vector<Node*> toRemove;
193
                for (auto pair : materialized) {
194
                    if (!m_combinedLiveness.liveAtTail[block].contains(pair.key))
195
                        toRemove.append(pair.key);
196
                }
197
                for (Node* key : toRemove)
198
                    materialized.remove(key);
199
                
200
                for (BasicBlock* successorBlock : block->successors()) {
201
                    for (auto pair : materialized) {
202
                        materializedAtHead[successorBlock].add(
203
                            pair.key, false).iterator->value |= pair.value;
204
                    }
205
                }
206
            }
207
        } while (changed);
208
        
209
        // Determine the sink candidates. Broadly, a sink candidate is a node that handleNode()
210
        // believes is sinkable, and one of the following is true:
211
        //
212
        // 1) There exists a basic block with only backward outgoing edges (or no outgoing edges)
213
        //    in which the node wasn't materialized. This is meant to catch effectively-infinite
214
        //    loops in which we don't need to have allocated the object.
215
        //
216
        // 2) There exists a basic block at the tail of which the node is not materialized and the
217
        //    node is dead.
218
        //
219
        // 3) The sum of execution counts of the materializations is less than the sum of
220
        //    execution counts of the original node.
221
        //
222
        // We currently implement only rule #2.
223
        // FIXME: Implement the two other rules.
224
        // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
225
        // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
226
        
227
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
228
            for (auto pair : materializedAtTail[block]) {
229
                if (pair.value)
230
                    continue; // It was materialized.
231
                
232
                if (m_combinedLiveness.liveAtTail[block].contains(pair.key))
233
                    continue; // It might still get materialized in all of the successors.
234
                
235
                // We know that it died in this block and it wasn't materialized. That means that
236
                // if we sink this allocation, then *this* will be a path along which we never
237
                // have to allocate. Profit!
238
                m_sinkCandidates.add(pair.key);
239
            }
240
        }
241
        
242
        if (m_sinkCandidates.isEmpty())
243
            return;
244
        
245
        // A materialization edge exists at any point where a node escapes but hasn't been
246
        // materialized yet. We do this in two parts. First we find all of the nodes that cause
247
        // escaping to happen, where the escapee had not yet been materialized. This catches
248
        // everything but loops. We then catch loops - as well as weirder control flow constructs -
249
        // in a subsequent pass that looks at places in the CFG where an edge exists from a block
250
        // that hasn't materialized to a block that has. We insert the materialization along such an
251
        // edge, and we rely on the fact that critical edges were already broken so that we actually
252
        // either insert the materialization at the head of the successor or the tail of the
253
        // predecessor.
254
        //
255
        // FIXME: This can create duplicate allocations when we really only needed to perform one.
256
        // For example:
257
        //
258
        //     var o = new Object();
259
        //     if (rare) {
260
        //         if (stuff)
261
        //             call(o); // o escapes here.
262
        //         return;
263
        //     }
264
        //     // o doesn't escape down here.
265
        //
266
        // In this example, we would place a materialization point at call(o) and then we would find
267
        // ourselves having to insert another one at the implicit else case of that if statement
268
        // ('cause we've broken critical edges). We would instead really like to just have one
269
        // materialization point right at the top of the then case of "if (rare)". To do this, we can
270
        // find the LCA of the various materializations in the dom tree.
271
        // https://bugs.webkit.org/show_bug.cgi?id=137124
272
        
273
        // First pass: find intra-block materialization points.
274
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
275
            HashSet<Node*> materialized;
276
            for (auto pair : materializedAtHead[block]) {
277
                if (pair.value && m_sinkCandidates.contains(pair.key))
278
                    materialized.add(pair.key);
279
            }
280
            
281
            if (verbose)
282
                dataLog("Placing materialization points in ", pointerDump(block), " with materialization set ", listDump(materialized), "\n");
283
            
284
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
285
                Node* node = block->at(nodeIndex);
286
                
287
                handleNode(
288
                    node,
289
                    [&] () { },
290
                    [&] (Node* escapee) {
291
                        if (verbose)
292
                            dataLog("Seeing ", escapee, " escape at ", node, "\n");
293
                        
294
                        if (!m_sinkCandidates.contains(escapee)) {
295
                            if (verbose)
296
                                dataLog("    It's not a sink candidate.\n");
297
                            return;
298
                        }
299
                        
300
                        if (!materialized.add(escapee).isNewEntry) {
301
                            if (verbose)
302
                                dataLog("   It's already materialized.\n");
303
                            return;
304
                        }
305
                        
306
                        createMaterialize(escapee, node);
307
                    });
308
            }
309
        }
310
        
311
        // Second pass: find CFG edge materialization points.
312
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
313
            for (BasicBlock* successorBlock : block->successors()) {
314
                for (auto pair : materializedAtHead[successorBlock]) {
315
                    Node* allocation = pair.key;
316
                    
317
                    // We only care if it is materialized in the successor.
318
                    if (!pair.value)
319
                        continue;
320
                    
321
                    // We only care about sinking candidates.
322
                    if (!m_sinkCandidates.contains(allocation))
323
                        continue;
324
                    
325
                    // We only care if it isn't materialized in the predecessor.
326
                    if (materializedAtTail[block].get(allocation))
327
                        continue;
328
                    
329
                    // We need to decide if we put the materialization at the head of the successor,
330
                    // or the tail of the predecessor. It needs to be placed so that the allocation
331
                    // is never materialized before, and always materialized after.
332
                    
333
                    // Is it never materialized in any of successor's predecessors? I like to think
334
                    // of "successors' predecessors" and "predecessor's successors" as the "shadow",
335
                    // because of what it looks like when you draw it.
336
                    bool neverMaterializedInShadow = true;
337
                    for (BasicBlock* shadowBlock : successorBlock->predecessors) {
338
                        if (materializedAtTail[shadowBlock].get(allocation)) {
339
                            neverMaterializedInShadow = false;
340
                            break;
341
                        }
342
                    }
343
                    
344
                    if (neverMaterializedInShadow) {
345
                        createMaterialize(allocation, successorBlock->firstOriginNode());
346
                        continue;
347
                    }
348
                    
349
                    // Because we had broken critical edges, it must be the case that the
350
                    // predecessor's successors all materialize the object. This is because the
351
                    // previous case is guaranteed to catch the case where the successor only has
352
                    // one predecessor. When critical edges are broken, this is also the only case
353
                    // where the predecessor could have had multiple successors. Therefore we have
354
                    // already handled the case where the predecessor has multiple successors.
355
                    DFG_ASSERT(m_graph, block, block->numSuccessors() == 1);
356
                    
357
                    createMaterialize(allocation, block->terminal());
358
                }
359
            }
360
        }
361
    }
362
    
363
    void placeMaterializationPoints()
364
    {
365
        m_ssaCalculator.reset();
366
        
367
        // The "variables" are the object allocations that we are sinking. So, nodeToVariable maps
368
        // sink candidates (aka escapees) to the SSACalculator's notion of Variable, and indexToNode
369
        // maps in the opposite direction using the SSACalculator::Variable::index() as the key.
370
        HashMap<Node*, SSACalculator::Variable*> nodeToVariable;
371
        Vector<Node*> indexToNode;
372
        
373
        for (Node* node : m_sinkCandidates) {
374
            SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
375
            nodeToVariable.add(node, variable);
376
            ASSERT(indexToNode.size() == variable->index());
377
            indexToNode.append(node);
378
        }
379
        
380
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
381
            for (Node* node : *block) {
382
                if (SSACalculator::Variable* variable = nodeToVariable.get(node))
383
                    m_ssaCalculator.newDef(variable, block, node);
384
                
385
                for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
386
                    m_ssaCalculator.newDef(
387
                        nodeToVariable.get(m_materializationToEscapee.get(materialize)),
388
                        block, materialize);
389
                }
390
            }
391
        }
392
        
393
        m_ssaCalculator.computePhis(
394
            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
395
                Node* allocation = indexToNode[variable->index()];
396
                if (!m_combinedLiveness.liveAtHead[block].contains(allocation))
397
                    return nullptr;
398
                
399
                Node* phiNode = m_graph.addNode(allocation->prediction(), Phi, NodeOrigin());
400
                phiNode->mergeFlags(NodeResultJS);
401
                return phiNode;
402
            });
403
        
404
        // Place Phis in the right places. Replace all uses of any allocation with the appropriate
405
        // materialization. Create the appropriate Upsilon nodes.
406
        LocalOSRAvailabilityCalculator availabilityCalculator;
407
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
408
            HashMap<Node*, Node*> mapping;
409
            
410
            for (Node* candidate : m_combinedLiveness.liveAtHead[block]) {
411
                SSACalculator::Variable* variable = nodeToVariable.get(candidate);
412
                if (!variable)
413
                    continue;
414
                
415
                SSACalculator::Def* def = m_ssaCalculator.reachingDefAtHead(block, variable);
416
                if (!def)
417
                    continue;
418
                
419
                mapping.set(indexToNode[variable->index()], def->value());
420
            }
421
            
422
            availabilityCalculator.beginBlock(block);
423
            for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
424
                m_insertionSet.insert(0, phiDef->value());
425
                
426
                Node* originalNode = indexToNode[phiDef->variable()->index()];
427
                insertOSRHintsForUpdate(
428
                    m_insertionSet, 0, NodeOrigin(), availabilityCalculator.m_availability,
429
                    originalNode, phiDef->value());
430
431
                mapping.set(originalNode, phiDef->value());
432
            }
433
            
434
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
435
                Node* node = block->at(nodeIndex);
436
437
                for (Node* materialize : m_materializationSiteToMaterializations.get(node)) {
438
                    Node* escapee = m_materializationToEscapee.get(materialize);
439
                    m_insertionSet.insert(nodeIndex, materialize);
440
                    insertOSRHintsForUpdate(
441
                        m_insertionSet, nodeIndex, node->origin,
442
                        availabilityCalculator.m_availability, escapee, materialize);
443
                    mapping.set(escapee, materialize);
444
                }
445
                    
446
                availabilityCalculator.executeNode(node);
447
                
448
                m_graph.doToChildren(
449
                    node,
450
                    [&] (Edge& edge) {
451
                        if (Node* materialize = mapping.get(edge.node()))
452
                            edge.setNode(materialize);
453
                    });
454
                
455
                // If you cause an escape, you shouldn't see the original escapee.
456
                if (validationEnabled()) {
457
                    handleNode(
458
                        node,
459
                        [&] () { },
460
                        [&] (Node* escapee) {
461
                            DFG_ASSERT(m_graph, node, !m_sinkCandidates.contains(escapee));
462
                        });
463
                }
464
            }
465
            
466
            NodeAndIndex terminal = block->findTerminal();
467
            size_t upsilonInsertionPoint = terminal.index;
468
            Node* upsilonWhere = terminal.node;
469
            NodeOrigin upsilonOrigin = upsilonWhere->origin;
470
            for (BasicBlock* successorBlock : block->successors()) {
471
                for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
472
                    Node* phiNode = phiDef->value();
473
                    SSACalculator::Variable* variable = phiDef->variable();
474
                    Node* allocation = indexToNode[variable->index()];
475
                    
476
                    Node* incoming = mapping.get(allocation);
477
                    DFG_ASSERT(m_graph, incoming, incoming != allocation);
478
                    
479
                    m_insertionSet.insertNode(
480
                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
481
                        OpInfo(phiNode), incoming->defaultEdge());
482
                }
483
            }
484
            
485
            m_insertionSet.execute(block);
486
        }
487
        
488
        // At this point we have dummy materialization nodes along with edges to them. This means
489
        // that the part of the control flow graph that prefers to see actual object allocations
490
        // is completely fixed up, except for the materializations themselves.
491
    }
492
    
493
    void lowerNonReadingOperationsOnPhantomAllocations()
494
    {
495
        // Lower everything but reading operations on phantom allocations. We absolutely have to
496
        // lower all writes so as to reveal them to the SSA calculator. We cannot lower reads
497
        // because the whole point is that those go away completely.
498
        
499
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
500
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
501
                Node* node = block->at(nodeIndex);
502
                switch (node->op()) {
503
                case PutByOffset: {
504
                    Node* target = node->child2().node();
505
                    if (m_sinkCandidates.contains(target)) {
506
                        ASSERT(target->isPhantomObjectAllocation());
507
                        node->convertToPutByOffsetHint();
508
                    }
509
                    break;
510
                }
511
512
                case PutClosureVar: {
513
                    Node* target = node->child1().node();
514
                    if (m_sinkCandidates.contains(target)) {
515
                        ASSERT(target->isPhantomActivationAllocation());
516
                        node->convertToPutClosureVarHint();
517
                    }
518
                    break;
519
                }
520
                    
521
                case PutStructure: {
522
                    Node* target = node->child1().node();
523
                    if (m_sinkCandidates.contains(target)) {
524
                        ASSERT(target->isPhantomObjectAllocation());
525
                        Node* structure = m_insertionSet.insertConstant(
526
                            nodeIndex, node->origin, JSValue(node->transition()->next));
527
                        node->convertToPutStructureHint(structure);
528
                    }
529
                    break;
530
                }
531
                    
532
                case NewObject: {
533
                    if (m_sinkCandidates.contains(node)) {
534
                        Node* structure = m_insertionSet.insertConstant(
535
                            nodeIndex + 1, node->origin, JSValue(node->structure()));
536
                        m_insertionSet.insert(
537
                            nodeIndex + 1,
538
                            PromotedHeapLocation(StructurePLoc, node).createHint(
539
                                m_graph, node->origin, structure));
540
                        node->convertToPhantomNewObject();
541
                    }
542
                    break;
543
                }
544
                    
545
                case MaterializeNewObject: {
546
                    if (m_sinkCandidates.contains(node)) {
547
                        m_insertionSet.insert(
548
                            nodeIndex + 1,
549
                            PromotedHeapLocation(StructurePLoc, node).createHint(
550
                                m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
551
                        for (unsigned i = 0; i < node->objectMaterializationData().m_properties.size(); ++i) {
552
                            unsigned identifierNumber =
553
                                node->objectMaterializationData().m_properties[i].m_identifierNumber;
554
                            m_insertionSet.insert(
555
                                nodeIndex + 1,
556
                                PromotedHeapLocation(
557
                                    NamedPropertyPLoc, node, identifierNumber).createHint(
558
                                    m_graph, node->origin,
559
                                    m_graph.varArgChild(node, i + 1).node()));
560
                        }
561
                        node->convertToPhantomNewObject();
562
                    }
563
                    break;
564
                }
565
566
                case NewFunction: {
567
                    if (m_sinkCandidates.contains(node)) {
568
                        Node* executable = m_insertionSet.insertConstant(
569
                            nodeIndex + 1, node->origin, node->cellOperand());
570
                        m_insertionSet.insert(
571
                            nodeIndex + 1,
572
                            PromotedHeapLocation(FunctionExecutablePLoc, node).createHint(
573
                                m_graph, node->origin, executable));
574
                        m_insertionSet.insert(
575
                            nodeIndex + 1,
576
                            PromotedHeapLocation(FunctionActivationPLoc, node).createHint(
577
                                m_graph, node->origin, node->child1().node()));
578
                        node->convertToPhantomNewFunction();
579
                    }
580
                    break;
581
                }
582
583
                case CreateActivation: {
584
                    if (m_sinkCandidates.contains(node)) {
585
                        m_insertionSet.insert(
586
                            nodeIndex + 1,
587
                            PromotedHeapLocation(ActivationScopePLoc, node).createHint(
588
                                m_graph, node->origin, node->child1().node()));
589
                        Node* symbolTableNode = m_insertionSet.insertConstant(
590
                            nodeIndex + 1, node->origin, node->cellOperand());
591
                        m_insertionSet.insert(
592
                            nodeIndex + 1,
593
                            PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
594
                                m_graph, node->origin, symbolTableNode));
595
596
                        {
597
                            SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
598
                            Node* undefined = m_insertionSet.insertConstant(
599
                                nodeIndex + 1, node->origin, jsUndefined());
600
                            ConcurrentJITLocker locker(symbolTable->m_lock);
601
                            for (auto iter = symbolTable->begin(locker), end = symbolTable->end(locker); iter != end; ++iter) {
602
                                m_insertionSet.insert(
603
                                    nodeIndex + 1,
604
                                    PromotedHeapLocation(
605
                                        ClosureVarPLoc, node, iter->value.scopeOffset().offset()).createHint(
606
                                        m_graph, node->origin, undefined));
607
                            }
608
                        }
609
610
                        node->convertToPhantomCreateActivation();
611
                    }
612
                    break;
613
                }
614
615
                case MaterializeCreateActivation: {
616
                    if (m_sinkCandidates.contains(node)) {
617
                        m_insertionSet.insert(
618
                            nodeIndex + 1,
619
                            PromotedHeapLocation(ActivationScopePLoc, node).createHint(
620
                                m_graph, node->origin, m_graph.varArgChild(node, 0).node()));
621
                        Node* symbolTableNode = m_insertionSet.insertConstant(
622
                            nodeIndex + 1, node->origin, node->cellOperand());
623
                        m_insertionSet.insert(
624
                            nodeIndex + 1,
625
                            PromotedHeapLocation(ActivationSymbolTablePLoc, node).createHint(
626
                                m_graph, node->origin, symbolTableNode));
627
                        ObjectMaterializationData& data = node->objectMaterializationData();
628
                        for (unsigned i = 0; i < data.m_properties.size(); ++i) {
629
                            unsigned identifierNumber = data.m_properties[i].m_identifierNumber;
630
                            m_insertionSet.insert(
631
                                nodeIndex + 1,
632
                                PromotedHeapLocation(
633
                                    ClosureVarPLoc, node, identifierNumber).createHint(
634
                                    m_graph, node->origin,
635
                                    m_graph.varArgChild(node, i + 1).node()));
636
                        }
637
                        node->convertToPhantomCreateActivation();
638
                    }
639
                    break;
640
                }
641
642
                case StoreBarrier: {
643
                    DFG_CRASH(m_graph, node, "Unexpected store barrier during sinking.");
644
                    break;
645
                }
646
                    
647
                default:
648
                    break;
649
                }
650
                
651
                m_graph.doToChildren(
652
                    node,
653
                    [&] (Edge& edge) {
654
                        if (m_sinkCandidates.contains(edge.node()))
655
                            edge.setUseKind(KnownCellUse);
656
                    });
657
            }
658
            m_insertionSet.execute(block);
659
        }
660
    }
661
    
662
    void promoteSunkenFields()
663
    {
664
        // Collect the set of heap locations that we will be operating over.
665
        HashSet<PromotedHeapLocation> locations;
666
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
667
            for (Node* node : *block) {
668
                promoteHeapAccess(
669
                    node,
670
                    [&] (PromotedHeapLocation location, Edge) {
671
                        if (m_sinkCandidates.contains(location.base()))
672
                            locations.add(location);
673
                    },
674
                    [&] (PromotedHeapLocation location) {
675
                        if (m_sinkCandidates.contains(location.base()))
676
                            locations.add(location);
677
                    });
678
            }
679
        }
680
        
681
        // Figure out which locations belong to which allocations.
682
        m_locationsForAllocation.clear();
683
        for (PromotedHeapLocation location : locations) {
684
            auto result = m_locationsForAllocation.add(location.base(), Vector<PromotedHeapLocation>());
685
            ASSERT(!result.iterator->value.contains(location));
686
            result.iterator->value.append(location);
687
        }
688
        
689
        // For each sunken thingy, make sure we create Bottom values for all of its fields.
690
        // Note that this has the hilarious slight inefficiency of creating redundant hints for
691
        // things that were previously materializations. This should only impact compile times and
692
        // not code quality, and it's necessary for soundness without some data structure hackage.
693
        // For example, a MaterializeNewObject that we choose to sink may have new fields added to
694
        // it conditionally. That would necessitate Bottoms.
695
        Node* bottom = nullptr;
696
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
697
            if (block == m_graph.block(0))
698
                bottom = m_insertionSet.insertConstant(0, NodeOrigin(), jsUndefined());
699
            
700
            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
701
                Node* node = block->at(nodeIndex);
702
                for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
703
                    m_insertionSet.insert(
704
                        nodeIndex + 1, location.createHint(m_graph, node->origin, bottom));
705
                }
706
            }
707
            m_insertionSet.execute(block);
708
        }
709
710
        m_ssaCalculator.reset();
711
712
        // Collect the set of "variables" that we will be sinking.
713
        m_locationToVariable.clear();
714
        m_indexToLocation.clear();
715
        for (PromotedHeapLocation location : locations) {
716
            SSACalculator::Variable* variable = m_ssaCalculator.newVariable();
717
            m_locationToVariable.add(location, variable);
718
            ASSERT(m_indexToLocation.size() == variable->index());
719
            m_indexToLocation.append(location);
720
        }
721
        
722
        // Create Defs from the existing hints.
723
        for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
724
            for (Node* node : *block) {
725
                promoteHeapAccess(
726
                    node,
727
                    [&] (PromotedHeapLocation location, Edge value) {
728
                        if (!m_sinkCandidates.contains(location.base()))
729
                            return;
730
                        SSACalculator::Variable* variable = m_locationToVariable.get(location);
731
                        m_ssaCalculator.newDef(variable, block, value.node());
732
                    },
733
                    [&] (PromotedHeapLocation) { });
734
            }
735
        }
736
        
737
        // OMG run the SSA calculator to create Phis!
738
        m_ssaCalculator.computePhis(
739
            [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
740
                PromotedHeapLocation location = m_indexToLocation[variable->index()];
741
                if (!m_combinedLiveness.liveAtHead[block].contains(location.base()))
742
                    return nullptr;
743
                
744
                Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, NodeOrigin());
745
                phiNode->mergeFlags(NodeResultJS);
746
                return phiNode;
747
            });
748
        
749
        // Place Phis in the right places, replace all uses of any load with the appropriate
750
        // value, and create the appropriate Upsilon nodes.
751
        m_graph.clearReplacements();
752
        for (BasicBlock* block : m_graph.blocksInPreOrder()) {
753
            // This mapping table is intended to be lazy. If something is omitted from the table,
754
            // it means that there haven't been any local stores to that promoted heap location.
755
            m_localMapping.clear();
756
            
757
            // Insert the Phi functions that we had previously created.
758
            for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(block)) {
759
                PromotedHeapLocation location = m_indexToLocation[phiDef->variable()->index()];
760
                
761
                m_insertionSet.insert(
762
                    0, phiDef->value());
763
                m_insertionSet.insert(
764
                    0, location.createHint(m_graph, NodeOrigin(), phiDef->value()));
765
                m_localMapping.add(location, phiDef->value());
766
            }
767
            
768
            if (verbose)
769
                dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
770
            
771
            // Process the block and replace all uses of loads with the promoted value.
772
            for (Node* node : *block) {
773
                m_graph.performSubstitution(node);
774
                
775
                if (Node* escapee = m_materializationToEscapee.get(node))
776
                    populateMaterialize(block, node, escapee);
777
                
778
                promoteHeapAccess(
779
                    node,
780
                    [&] (PromotedHeapLocation location, Edge value) {
781
                        if (m_sinkCandidates.contains(location.base()))
782
                            m_localMapping.set(location, value.node());
783
                    },
784
                    [&] (PromotedHeapLocation location) {
785
                        if (m_sinkCandidates.contains(location.base())) {
786
                            switch (node->op()) {
787
                            case CheckStructure:
788
                                node->convertToCheckStructureImmediate(resolve(block, location));
789
                                break;
790
791
                            default:
792
                                node->replaceWith(resolve(block, location));
793
                                break;
794
                            }
795
                        }
796
                    });
797
            }
798
            
799
            // Gotta drop some Upsilons.
800
            NodeAndIndex terminal = block->findTerminal();
801
            size_t upsilonInsertionPoint = terminal.index;
802
            NodeOrigin upsilonOrigin = terminal.node->origin;
803
            for (BasicBlock* successorBlock : block->successors()) {
804
                for (SSACalculator::Def* phiDef : m_ssaCalculator.phisForBlock(successorBlock)) {
805
                    Node* phiNode = phiDef->value();
806
                    SSACalculator::Variable* variable = phiDef->variable();
807
                    PromotedHeapLocation location = m_indexToLocation[variable->index()];
808
                    Node* incoming = resolve(block, location);
809
                    
810
                    m_insertionSet.insertNode(
811
                        upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
812
                        OpInfo(phiNode), incoming->defaultEdge());
813
                }
814
            }
815
            
816
            m_insertionSet.execute(block);
817
        }
818
    }
819
    
820
    Node* resolve(BasicBlock* block, PromotedHeapLocation location)
821
    {
822
        if (Node* result = m_localMapping.get(location))
823
            return result;
824
        
825
        // This implies that there is no local mapping. Find a non-local mapping.
826
        SSACalculator::Def* def = m_ssaCalculator.nonLocalReachingDef(
827
            block, m_locationToVariable.get(location));
828
        ASSERT(def);
829
        ASSERT(def->value());
830
        m_localMapping.add(location, def->value());
831
        return def->value();
832
    }
833
834
    template<typename SinkCandidateFunctor, typename EscapeFunctor>
835
    void handleNode(
836
        Node* node,
837
        const SinkCandidateFunctor& sinkCandidate,
838
        const EscapeFunctor& escape)
839
    {
840
        switch (node->op()) {
841
        case NewObject:
842
        case MaterializeNewObject:
843
        case MaterializeCreateActivation:
844
            sinkCandidate();
845
            m_graph.doToChildren(
846
                node,
847
                [&] (Edge edge) {
848
                    escape(edge.node());
849
                });
850
            break;
851
852
        case NewFunction:
853
            if (!node->castOperand<FunctionExecutable*>()->singletonFunction()->isStillValid())
854
                sinkCandidate();
855
            escape(node->child1().node());
856
            break;
857
858
        case CreateActivation:
859
            if (!node->castOperand<SymbolTable*>()->singletonScope()->isStillValid())
860
                sinkCandidate();
861
            escape(node->child1().node());
862
            break;
863
864
        case Check:
865
            m_graph.doToChildren(
866
                node,
867
                [&] (Edge edge) {
868
                    if (edge.willNotHaveCheck())
869
                        return;
870
                    
871
                    if (alreadyChecked(edge.useKind(), SpecObject))
872
                        return;
873
                    
874
                    escape(edge.node());
875
                });
876
            break;
877
878
        case MovHint:
879
        case PutHint:
880
            break;
881
882
        case PutStructure:
883
        case CheckStructure:
884
        case GetByOffset:
885
        case MultiGetByOffset:
886
        case GetGetterSetterByOffset: {
887
            Node* target = node->child1().node();
888
            if (!target->isObjectAllocation())
889
                escape(target);
890
            break;
891
        }
892
            
893
        case PutByOffset: {
894
            Node* target = node->child2().node();
895
            if (!target->isObjectAllocation()) {
896
                escape(target);
897
                escape(node->child1().node());
898
            }
899
            escape(node->child3().node());
900
            break;
901
        }
902
903
        case GetClosureVar: {
904
            Node* target = node->child1().node();
905
            if (!target->isActivationAllocation())
906
                escape(target);
907
            break;
908
        }
909
910
        case PutClosureVar: {
911
            Node* target = node->child1().node();
912
            if (!target->isActivationAllocation())
913
                escape(target);
914
            escape(node->child2().node());
915
            break;
916
        }
917
918
        case GetScope: {
919
            Node* target = node->child1().node();
920
            if (!target->isFunctionAllocation())
921
                escape(target);
922
            break;
923
        }
924
925
        case GetExecutable: {
926
            Node* target = node->child1().node();
927
            if (!target->isFunctionAllocation())
928
                escape(target);
929
            break;
930
        }
931
932
        case SkipScope: {
933
            Node* target = node->child1().node();
934
            if (!target->isActivationAllocation())
935
                escape(target);
936
            break;
937
        }
938
            
939
        case MultiPutByOffset:
940
            // FIXME: In the future we should be able to handle this. It's just a matter of
941
            // building the appropriate *Hint variant of this instruction, along with a
942
            // PhantomStructureSelect node - since this transforms the Structure in a conditional
943
            // way.
944
            // https://bugs.webkit.org/show_bug.cgi?id=136924
945
            escape(node->child1().node());
946
            escape(node->child2().node());
947
            break;
948
949
        default:
950
            m_graph.doToChildren(
951
                node,
952
                [&] (Edge edge) {
953
                    escape(edge.node());
954
                });
955
            break;
956
        }
957
    }
958
    
959
    Node* createMaterialize(Node* escapee, Node* where)
960
    {
961
        Node* result = nullptr;
962
        
963
        switch (escapee->op()) {
964
        case NewObject:
965
        case MaterializeNewObject: {
966
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
967
            
968
            result = m_graph.addNode(
969
                escapee->prediction(), Node::VarArg, MaterializeNewObject,
970
                NodeOrigin(
971
                    escapee->origin.semantic,
972
                    where->origin.forExit),
973
                OpInfo(data), OpInfo(), 0, 0);
974
            break;
975
        }
976
977
        case NewFunction:
978
            result = m_graph.addNode(
979
                escapee->prediction(), NewFunction,
980
                NodeOrigin(
981
                    escapee->origin.semantic,
982
                    where->origin.forExit),
983
                OpInfo(escapee->cellOperand()),
984
                escapee->child1());
985
            break;
986
987
        case CreateActivation:
988
        case MaterializeCreateActivation: {
989
            ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
990
            FrozenValue* symbolTable = escapee->cellOperand();
991
            result = m_graph.addNode(
992
                escapee->prediction(), Node::VarArg, MaterializeCreateActivation,
993
                NodeOrigin(
994
                    escapee->origin.semantic,
995
                    where->origin.forExit),
996
                OpInfo(data), OpInfo(symbolTable), 0, 0);
997
            break;
998
        }
999
1000
        default:
1001
            DFG_CRASH(m_graph, escapee, "Bad escapee op");
1002
            break;
1003
        }
1004
        
1005
        if (verbose)
1006
            dataLog("Creating materialization point at ", where, " for ", escapee, ": ", result, "\n");
1007
        
1008
        m_materializationToEscapee.add(result, escapee);
1009
        m_materializationSiteToMaterializations.add(
1010
            where, Vector<Node*>()).iterator->value.append(result);
1011
        
1012
        return result;
1013
    }
1014
    
1015
    void populateMaterialize(BasicBlock* block, Node* node, Node* escapee)
1016
    {
1017
        switch (node->op()) {
1018
        case MaterializeNewObject: {
1019
            ObjectMaterializationData& data = node->objectMaterializationData();
1020
            unsigned firstChild = m_graph.m_varArgChildren.size();
1021
            
1022
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1023
            
1024
            PromotedHeapLocation structure(StructurePLoc, escapee);
1025
            ASSERT(locations.contains(structure));
1026
            
1027
            m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
1028
            
1029
            for (unsigned i = 0; i < locations.size(); ++i) {
1030
                switch (locations[i].kind()) {
1031
                case StructurePLoc: {
1032
                    ASSERT(locations[i] == structure);
1033
                    break;
1034
                }
1035
                    
1036
                case NamedPropertyPLoc: {
1037
                    Node* value = resolve(block, locations[i]);
1038
                    if (value->op() == BottomValue) {
1039
                        // We can skip Bottoms entirely.
1040
                        break;
1041
                    }
1042
                    
1043
                    data.m_properties.append(PhantomPropertyValue(locations[i].info()));
1044
                    m_graph.m_varArgChildren.append(value);
1045
                    break;
1046
                }
1047
                    
1048
                default:
1049
                    DFG_CRASH(m_graph, node, "Bad location kind");
1050
                }
1051
            }
1052
            
1053
            node->children = AdjacencyList(
1054
                AdjacencyList::Variable,
1055
                firstChild, m_graph.m_varArgChildren.size() - firstChild);
1056
            break;
1057
        }
1058
1059
        case MaterializeCreateActivation: {
1060
            ObjectMaterializationData& data = node->objectMaterializationData();
1061
1062
            unsigned firstChild = m_graph.m_varArgChildren.size();
1063
1064
            Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1065
1066
            PromotedHeapLocation scope(ActivationScopePLoc, escapee);
1067
            PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, escapee);
1068
            ASSERT(locations.contains(scope));
1069
1070
            m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
1071
1072
            for (unsigned i = 0; i < locations.size(); ++i) {
1073
                switch (locations[i].kind()) {
1074
                case ActivationScopePLoc: {
1075
                    ASSERT(locations[i] == scope);
1076
                    break;
1077
                }
1078
1079
                case ActivationSymbolTablePLoc: {
1080
                    ASSERT(locations[i] == symbolTable);
1081
                    break;
1082
                }
1083
1084
                case ClosureVarPLoc: {
1085
                    Node* value = resolve(block, locations[i]);
1086
                    if (value->op() == BottomValue)
1087
                        break;
1088
1089
                    data.m_properties.append(PhantomPropertyValue(locations[i].info()));
1090
                    m_graph.m_varArgChildren.append(value);
1091
                    break;
1092
                }
1093
1094
                default:
1095
                    DFG_CRASH(m_graph, node, "Bad location kind");
1096
                }
1097
            }
1098
1099
            node->children = AdjacencyList(
1100
                AdjacencyList::Variable,
1101
                firstChild, m_graph.m_varArgChildren.size() - firstChild);
1102
            break;
1103
        }
1104
1105
        case NewFunction: {
1106
            if (!ASSERT_DISABLED) {
1107
                Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
1108
1109
                ASSERT(locations.size() == 2);
1110
1111
                PromotedHeapLocation executable(FunctionExecutablePLoc, escapee);
1112
                ASSERT(locations.contains(executable));
1113
1114
                PromotedHeapLocation activation(FunctionActivationPLoc, escapee);
1115
                ASSERT(locations.contains(activation));
1116
1117
                for (unsigned i = 0; i < locations.size(); ++i) {
1118
                    switch (locations[i].kind()) {
1119
                    case FunctionExecutablePLoc: {
1120
                        ASSERT(locations[i] == executable);
1121
                        break;
1122
                    }
1123
1124
                    case FunctionActivationPLoc: {
1125
                        ASSERT(locations[i] == activation);
1126
                        break;
1127
                    }
1128
1129
                    default:
1130
                        DFG_CRASH(m_graph, node, "Bad location kind");
1131
                    }
1132
                }
1133
            }
1134
1135
            break;
1136
        }
1137
1138
        default:
1139
            DFG_CRASH(m_graph, node, "Bad materialize op");
1140
            break;
1141
        }
1142
    }
1143
    
1144
    CombinedLiveness m_combinedLiveness;
1145
    SSACalculator m_ssaCalculator;
1146
    HashSet<Node*> m_sinkCandidates;
1147
    HashMap<Node*, Node*> m_materializationToEscapee;
1148
    HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
1149
    HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
1150
    HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
1151
    Vector<PromotedHeapLocation> m_indexToLocation;
1152
    HashMap<PromotedHeapLocation, Node*> m_localMapping;
1153
    InsertionSet m_insertionSet;
1154
};
1155
    
1156
bool performObjectAllocationSinking(Graph& graph)
1157
{
1158
    SamplingRegion samplingRegion("DFG Object Allocation Sinking Phase");
1159
    return runPhase<ObjectAllocationSinkingPhase>(graph);
1160
}
1161
1162
} } // namespace JSC::DFG
1163
1164
#endif // ENABLE(DFG_JIT)
1165
- a/Source/JavaScriptCore/dfg/DFGPlan.cpp -3 / +5 lines
Lines 28-33 a/Source/JavaScriptCore/dfg/DFGPlan.cpp_sec1
28
28
29
#if ENABLE(DFG_JIT)
29
#if ENABLE(DFG_JIT)
30
30
31
#include "DFGAllocationCycleSinkingPhase.h"
31
#include "DFGArgumentsEliminationPhase.h"
32
#include "DFGArgumentsEliminationPhase.h"
32
#include "DFGBackwardsPropagationPhase.h"
33
#include "DFGBackwardsPropagationPhase.h"
33
#include "DFGByteCodeParser.h"
34
#include "DFGByteCodeParser.h"
Lines 365-374 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) a/Source/JavaScriptCore/dfg/DFGPlan.cpp_sec2
365
        performCleanUp(dfg); // Reduce the graph size a lot.
366
        performCleanUp(dfg); // Reduce the graph size a lot.
366
        changed = false;
367
        changed = false;
367
        changed |= performStrengthReduction(dfg);
368
        changed |= performStrengthReduction(dfg);
368
        if (Options::enableObjectAllocationSinking()) {
369
        changed |= performCriticalEdgeBreaking(dfg);
369
            changed |= performCriticalEdgeBreaking(dfg);
370
        if (Options::enableAllocationCycleSinking())
371
            changed |= performAllocationCycleSinking(dfg);
372
        else
370
            changed |= performObjectAllocationSinking(dfg);
373
            changed |= performObjectAllocationSinking(dfg);
371
        }
372
        if (changed) {
374
        if (changed) {
373
            // State-at-tail and state-at-head will be invalid if we did strength reduction since
375
            // State-at-tail and state-at-head will be invalid if we did strength reduction since
374
            // it might increase live ranges.
376
            // it might increase live ranges.
- a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h -1 / +37 lines
Lines 57-64 public: a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h_sec1
57
        , m_info(info)
57
        , m_info(info)
58
    {
58
    {
59
    }
59
    }
60
    
60
61
    PromotedLocationDescriptor(WTF::HashTableDeletedValueType)
62
        : m_kind(InvalidPromotedLocationKind)
63
        , m_info(1)
64
    {
65
    }
66
61
    bool operator!() const { return m_kind == InvalidPromotedLocationKind; }
67
    bool operator!() const { return m_kind == InvalidPromotedLocationKind; }
68
69
    explicit operator bool() const { return !!*this; }
62
    
70
    
63
    PromotedLocationKind kind() const { return m_kind; }
71
    PromotedLocationKind kind() const { return m_kind; }
64
    unsigned info() const { return m_info; }
72
    unsigned info() const { return m_info; }
Lines 86-91 public: a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h_sec2
86
    {
94
    {
87
        return m_kind == InvalidPromotedLocationKind && m_info;
95
        return m_kind == InvalidPromotedLocationKind && m_info;
88
    }
96
    }
97
98
    bool neededForMaterialization() const
99
    {
100
        switch (kind()) {
101
        case NamedPropertyPLoc:
102
        case ClosureVarPLoc:
103
            return false;
104
105
        default:
106
            return true;
107
        }
108
    }
89
    
109
    
90
    void dump(PrintStream& out) const;
110
    void dump(PrintStream& out) const;
91
111
Lines 94-99 private: a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h_sec3
94
    unsigned m_info;
114
    unsigned m_info;
95
};
115
};
96
116
117
struct PromotedLocationDescriptorHash {
118
    static unsigned hash(const PromotedLocationDescriptor& key) { return key.hash(); }
119
    static bool equal(const PromotedLocationDescriptor& a, const PromotedLocationDescriptor& b) { return a == b; }
120
    static const bool safeToCompareToEmptyOrDeleted = true;
121
};
122
97
class PromotedHeapLocation {
123
class PromotedHeapLocation {
98
public:
124
public:
99
    PromotedHeapLocation(
125
    PromotedHeapLocation(
Lines 176-181 template<> struct HashTraits<JSC::DFG::PromotedHeapLocation> : SimpleClassHashTr a/Source/JavaScriptCore/dfg/DFGPromotedHeapLocation.h_sec4
176
    static const bool emptyValueIsZero = false;
202
    static const bool emptyValueIsZero = false;
177
};
203
};
178
204
205
template<typename T> struct DefaultHash;
206
template<> struct DefaultHash<JSC::DFG::PromotedLocationDescriptor> {
207
    typedef JSC::DFG::PromotedLocationDescriptorHash Hash;
208
};
209
210
template<typename T> struct HashTraits;
211
template<> struct HashTraits<JSC::DFG::PromotedLocationDescriptor> : SimpleClassHashTraits<JSC::DFG::PromotedLocationDescriptor> {
212
    static const bool emptyValueIsZero = false;
213
};
214
179
} // namespace WTF
215
} // namespace WTF
180
216
181
#endif // ENABLE(DFG_JIT)
217
#endif // ENABLE(DFG_JIT)
- a/Source/JavaScriptCore/dfg/DFGValidate.cpp -1 / +31 lines
Lines 524-535 private: a/Source/JavaScriptCore/dfg/DFGValidate.cpp_sec1
524
                case Phantom:
524
                case Phantom:
525
                    VALIDATE((node), !"bad node type for SSA");
525
                    VALIDATE((node), !"bad node type for SSA");
526
                    break;
526
                    break;
527
                    
527
528
                default:
528
                default:
529
                    // FIXME: Add more things here.
529
                    // FIXME: Add more things here.
530
                    // https://bugs.webkit.org/show_bug.cgi?id=123471
530
                    // https://bugs.webkit.org/show_bug.cgi?id=123471
531
                    break;
531
                    break;
532
                }
532
                }
533
                switch (node->op()) {
534
                case PhantomNewObject:
535
                case PhantomNewFunction:
536
                case PhantomCreateActivation:
537
                case PhantomDirectArguments:
538
                case PhantomClonedArguments:
539
                case MovHint:
540
                case Upsilon:
541
                case ForwardVarargs:
542
                case CallForwardVarargs:
543
                case ConstructForwardVarargs:
544
                case GetMyArgumentByVal:
545
                    break;
546
547
                case Check:
548
                    // FIXME: This is probably not correct.
549
                    break;
550
551
                case PutHint:
552
                    VALIDATE((node), node->child1()->isPhantomAllocation());
553
                    break;
554
555
                default:
556
                    m_graph.doToChildren(
557
                        node,
558
                        [&] (const Edge& edge) {
559
                            VALIDATE((node), !edge->isPhantomAllocation());
560
                        });
561
                    break;
562
                }
533
            }
563
            }
534
        }
564
        }
535
    }
565
    }
- a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp -8 / +4 lines
Lines 5286-5297 private: a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp_sec1
5286
        for (unsigned i = 0; i < data.m_properties.size(); ++i)
5286
        for (unsigned i = 0; i < data.m_properties.size(); ++i)
5287
            values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
5287
            values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
5288
        
5288
        
5289
        StructureSet set;
5289
        const StructureSet& set = m_node->structureSet();
5290
        m_interpreter.phiChildren()->forAllTransitiveIncomingValues(
5291
            m_graph.varArgChild(m_node, 0).node(),
5292
            [&] (Node* incoming) {
5293
                set.add(incoming->castConstant<Structure*>());
5294
            });
5295
        
5290
        
5296
        Vector<LBasicBlock, 1> blocks(set.size());
5291
        Vector<LBasicBlock, 1> blocks(set.size());
5297
        for (unsigned i = set.size(); i--;)
5292
        for (unsigned i = set.size(); i--;)
Lines 5390-5399 private: a/Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp_sec2
5390
5385
5391
        Vector<LValue, 8> values;
5386
        Vector<LValue, 8> values;
5392
        for (unsigned i = 0; i < data.m_properties.size(); ++i)
5387
        for (unsigned i = 0; i < data.m_properties.size(); ++i)
5393
            values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
5388
            values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
5394
5389
5395
        LValue scope = lowCell(m_graph.varArgChild(m_node, 0));
5390
        LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
5396
        SymbolTable* table = m_node->castOperand<SymbolTable*>();
5391
        SymbolTable* table = m_node->castOperand<SymbolTable*>();
5392
        ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>());
5397
        Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
5393
        Structure* structure = m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure();
5398
5394
5399
        LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("MaterializeCreateActivation slow path"));
5395
        LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("MaterializeCreateActivation slow path"));
- a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp -17 / +46 lines
Lines 220-243 static void compileStub( a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp_sec1
220
                jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
220
                jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
221
            }
221
            }
222
        }
222
        }
223
        
223
224
        if (!!exit.m_valueProfile)
224
        if (!!exit.m_valueProfile)
225
            jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0));
225
            jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0));
226
    }
226
    }
227
    
227
228
    // Materialize all objects. Don't materialize an object until all of the objects it needs
228
    // Materialize all objects. Don't materialize an object until all
229
    // have been materialized. Curiously, this is the only place that we have an algorithm that prevents
229
    // of the objects it needs have been materialized. We break cycles
230
    // OSR exit from handling cyclic object materializations. Of course, object allocation sinking
230
    // by populating objects late - we only consider an object as
231
    // currently wouldn't recognize a cycle as being sinkable - but if it did then the only thing that
231
    // needing another object if the later is needed for the
232
    // would ahve to change is this fixpoint. Instead we would allocate the objects first and populate
232
    // allocation of the former.
233
    // them with data later.
233
234
    HashSet<ExitTimeObjectMaterialization*> toMaterialize;
234
    HashSet<ExitTimeObjectMaterialization*> toMaterialize;
235
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
235
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
236
        toMaterialize.add(materialization);
236
        toMaterialize.add(materialization);
237
    
237
238
    while (!toMaterialize.isEmpty()) {
238
    while (!toMaterialize.isEmpty()) {
239
        unsigned previousToMaterializeSize = toMaterialize.size();
239
        unsigned previousToMaterializeSize = toMaterialize.size();
240
        
240
241
        Vector<ExitTimeObjectMaterialization*> worklist;
241
        Vector<ExitTimeObjectMaterialization*> worklist;
242
        worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
242
        worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
243
        for (ExitTimeObjectMaterialization* materialization : worklist) {
243
        for (ExitTimeObjectMaterialization* materialization : worklist) {
Lines 246-265 static void compileStub( a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp_sec2
246
            for (ExitPropertyValue value : materialization->properties()) {
246
            for (ExitPropertyValue value : materialization->properties()) {
247
                if (!value.value().isObjectMaterialization())
247
                if (!value.value().isObjectMaterialization())
248
                    continue;
248
                    continue;
249
                if (!value.location().neededForMaterialization())
250
                    continue;
249
                if (toMaterialize.contains(value.value().objectMaterialization())) {
251
                if (toMaterialize.contains(value.value().objectMaterialization())) {
250
                    // Gotta skip this one, since one of its fields points to a materialization
252
                    // Gotta skip this one, since it needs a
251
                    // that hasn't been materialized.
253
                    // materialization that hasn't been materialized.
252
                    allGood = false;
254
                    allGood = false;
253
                    break;
255
                    break;
254
                }
256
                }
255
            }
257
            }
256
            if (!allGood)
258
            if (!allGood)
257
                continue;
259
                continue;
258
            
260
259
            // All systems go for materializing the object. First we recover the values of all of
261
            // All systems go for materializing the object. First we
260
            // its fields and then we call a function to actually allocate the beast.
262
            // recover the values of all of its fields and then we
263
            // call a function to actually allocate the beast.
264
            // We only recover the fields that are needed for the allocation.
261
            for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
265
            for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
262
                const ExitValue& value = materialization->properties()[propertyIndex].value();
266
                const ExitPropertyValue& property = materialization->properties()[propertyIndex];
267
                const ExitValue& value = property.value();
268
                if (!property.location().neededForMaterialization())
269
                    continue;
270
263
                compileRecovery(
271
                compileRecovery(
264
                    jit, value, record, jitCode->stackmaps, registerScratch,
272
                    jit, value, record, jitCode->stackmaps, registerScratch,
265
                    materializationToPointer);
273
                    materializationToPointer);
Lines 273-279 static void compileStub( a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp_sec3
273
            jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
281
            jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
274
            jit.call(GPRInfo::nonArgGPR0);
282
            jit.call(GPRInfo::nonArgGPR0);
275
            jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
283
            jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
276
            
284
277
            // Let everyone know that we're done.
285
            // Let everyone know that we're done.
278
            toMaterialize.remove(materialization);
286
            toMaterialize.remove(materialization);
279
        }
287
        }
Lines 284-289 static void compileStub( a/Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp_sec4
284
        RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
292
        RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
285
    }
293
    }
286
294
295
    // Now that all the objects have been allocated, we populate them
296
    // with the correct values. This time we can recover all the
297
    // fields, including those that are only needed for the allocation.
298
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) {
299
        for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
300
            const ExitValue& value = materialization->properties()[propertyIndex].value();
301
            compileRecovery(
302
                jit, value, record, jitCode->stackmaps, registerScratch,
303
                materializationToPointer);
304
            jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
305
        }
306
307
        // This call assumes that we don't pass arguments on the stack
308
        jit.setupArgumentsWithExecState(
309
            CCallHelpers::TrustedImmPtr(materialization),
310
            CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
311
            CCallHelpers::TrustedImmPtr(materializationArguments));
312
        jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
313
        jit.call(GPRInfo::nonArgGPR0);
314
    }
315
287
    // Save all state from wherever the exit data tells us it was, into the appropriate place in
316
    // Save all state from wherever the exit data tells us it was, into the appropriate place in
288
    // the scratch buffer. This also does the reboxing.
317
    // the scratch buffer. This also does the reboxing.
289
    
318
    
- a/Source/JavaScriptCore/ftl/FTLOperations.cpp -26 / +91 lines
Lines 48-96 extern "C" JSCell* JIT_OPERATION operationNewObjectWithButterfly(ExecState* exec a/Source/JavaScriptCore/ftl/FTLOperations.cpp_sec1
48
    return JSFinalObject::create(exec, structure, butterfly);
48
    return JSFinalObject::create(exec, structure, butterfly);
49
}
49
}
50
50
51
extern "C" void JIT_OPERATION operationPopulateObjectInOSR(
52
    ExecState* exec, ExitTimeObjectMaterialization* materialization,
53
    EncodedJSValue* encodedValue, EncodedJSValue* values)
54
{
55
    VM& vm = exec->vm();
56
    CodeBlock* codeBlock = exec->codeBlock();
57
58
    // We cannot GC. We've got pointers in evil places.
59
    // FIXME: We are not doing anything that can GC here, and this is
60
    // probably unnecessary.
61
    DeferGCForAWhile deferGC(vm.heap);
62
63
    switch (materialization->type()) {
64
    case PhantomNewObject: {
65
        JSFinalObject* object = jsCast<JSFinalObject*>(JSValue::decode(*encodedValue));
66
        Structure* structure = object->structure();
67
68
        // Figure out what the heck to populate the object with. Use
69
        // getPropertiesConcurrently() because that happens to be
70
        // lower-level and more convenient. It doesn't change the
71
        // materialization of the property table. We want to have
72
        // minimal visible effects on the system. Also, don't mind
73
        // that this is O(n^2). It doesn't matter. We only get here
74
        // from OSR exit.
75
        for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
76
            for (unsigned i = materialization->properties().size(); i--;) {
77
                const ExitPropertyValue& property = materialization->properties()[i];
78
                if (property.location().kind() != NamedPropertyPLoc)
79
                    continue;
80
                if (codeBlock->identifier(property.location().info()).impl() != entry.key)
81
                    continue;
82
83
                object->putDirect(vm, entry.offset, JSValue::decode(values[i]));
84
            }
85
        }
86
        break;
87
    }
88
89
    case PhantomNewFunction:
90
    case PhantomDirectArguments:
91
    case PhantomClonedArguments:
92
        // Those are completely handled by operationMaterializeObjectInOSR
93
        break;
94
95
    case PhantomCreateActivation: {
96
        JSLexicalEnvironment* activation = jsCast<JSLexicalEnvironment*>(JSValue::decode(*encodedValue));
97
98
        // Figure out what to populate the activation with
99
        for (unsigned i = materialization->properties().size(); i--;) {
100
            const ExitPropertyValue& property = materialization->properties()[i];
101
            if (property.location().kind() != ClosureVarPLoc)
102
                continue;
103
104
            activation->variableAt(ScopeOffset(property.location().info())).set(exec->vm(), activation, JSValue::decode(values[i]));
105
        }
106
107
        break;
108
    }
109
110
111
    default:
112
        RELEASE_ASSERT_NOT_REACHED();
113
        break;
114
115
    }
116
}
117
51
extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
118
extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
52
    ExecState* exec, ExitTimeObjectMaterialization* materialization, EncodedJSValue* values)
119
    ExecState* exec, ExitTimeObjectMaterialization* materialization, EncodedJSValue* values)
53
{
120
{
54
    VM& vm = exec->vm();
121
    VM& vm = exec->vm();
55
    CodeBlock* codeBlock = exec->codeBlock();
56
122
57
    // We cannot GC. We've got pointers in evil places.
123
    // We cannot GC. We've got pointers in evil places.
58
    DeferGCForAWhile deferGC(vm.heap);
124
    DeferGCForAWhile deferGC(vm.heap);
59
    
125
    
60
    switch (materialization->type()) {
126
    switch (materialization->type()) {
61
    case PhantomNewObject: {
127
    case PhantomNewObject: {
62
        // First figure out what the structure is.
128
        // Figure out what the structure is
63
        Structure* structure = nullptr;
129
        Structure* structure = nullptr;
64
        for (unsigned i = materialization->properties().size(); i--;) {
130
        for (unsigned i = materialization->properties().size(); i--;) {
65
            const ExitPropertyValue& property = materialization->properties()[i];
131
            const ExitPropertyValue& property = materialization->properties()[i];
66
            if (property.location() != PromotedLocationDescriptor(StructurePLoc))
132
            if (property.location() != PromotedLocationDescriptor(StructurePLoc))
67
                continue;
133
                continue;
68
        
134
69
            structure = jsCast<Structure*>(JSValue::decode(values[i]));
135
            structure = jsCast<Structure*>(JSValue::decode(values[i]));
70
            break;
136
            break;
71
        }
137
        }
72
        RELEASE_ASSERT(structure);
138
        RELEASE_ASSERT(structure);
73
    
139
74
        // Let's create that object!
75
        JSFinalObject* result = JSFinalObject::create(vm, structure);
140
        JSFinalObject* result = JSFinalObject::create(vm, structure);
76
    
141
77
        // Now figure out what the heck to populate the object with. Use getPropertiesConcurrently()
142
        // The real values will be put subsequently by
78
        // because that happens to be lower-level and more convenient. It doesn't change the
143
        // operationPopulateNewObjectInOSR. We can't fill them in
79
        // materialization of the property table. We want to have minimal visible effects on the
144
        // now, because they may not be available yet (typically
80
        // system. Also, don't mind that this is O(n^2). It doesn't matter. We only get here from OSR
145
        // because we have a cyclic dependency graph).
81
        // exit.
146
82
        for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
147
        // We put a dummy value here in order to avoid super-subtle
83
            for (unsigned i = materialization->properties().size(); i--;) {
148
        // GC-and-OSR-exit crashes in case we have a bug and some
84
                const ExitPropertyValue& property = materialization->properties()[i];
149
        // field is, for any reason, not filled later.
85
                if (property.location().kind() != NamedPropertyPLoc)
150
        // We use a random-ish number instead of a sensible value like
86
                    continue;
151
        // undefined to make possible bugs easier to track.
87
                if (codeBlock->identifier(property.location().info()).impl() != entry.key)
152
        for (PropertyMapEntry entry : structure->getPropertiesConcurrently())
88
                    continue;
153
            result->putDirect(vm, entry.offset, jsNumber(19723));
89
            
154
90
                result->putDirect(vm, entry.offset, JSValue::decode(values[i]));
91
            }
92
        }
93
    
94
        return result;
155
        return result;
95
    }
156
    }
96
157
Lines 113-119 extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( a/Source/JavaScriptCore/ftl/FTLOperations.cpp_sec2
113
    }
174
    }
114
175
115
    case PhantomCreateActivation: {
176
    case PhantomCreateActivation: {
116
        // Figure out where the scope is
177
        // Figure out what the scope and symbol table are
117
        JSScope* scope = nullptr;
178
        JSScope* scope = nullptr;
118
        SymbolTable* table = nullptr;
179
        SymbolTable* table = nullptr;
119
        for (unsigned i = materialization->properties().size(); i--;) {
180
        for (unsigned i = materialization->properties().size(); i--;) {
Lines 133-145 extern "C" JSCell* JIT_OPERATION operationMaterializeObjectInOSR( a/Source/JavaScriptCore/ftl/FTLOperations.cpp_sec3
133
        JSLexicalEnvironment* result = JSLexicalEnvironment::create(vm, structure, scope, table);
194
        JSLexicalEnvironment* result = JSLexicalEnvironment::create(vm, structure, scope, table);
134
195
135
        RELEASE_ASSERT(materialization->properties().size() - 2 == table->scopeSize());
196
        RELEASE_ASSERT(materialization->properties().size() - 2 == table->scopeSize());
136
        // Figure out what to populate the activation with
197
198
        // The real values will be put subsequently by
199
        // operationPopulateNewObjectInOSR. See the PhantomNewObject
200
        // case for details.
137
        for (unsigned i = materialization->properties().size(); i--;) {
201
        for (unsigned i = materialization->properties().size(); i--;) {
138
            const ExitPropertyValue& property = materialization->properties()[i];
202
            const ExitPropertyValue& property = materialization->properties()[i];
139
            if (property.location().kind() != ClosureVarPLoc)
203
            if (property.location().kind() != ClosureVarPLoc)
140
                continue;
204
                continue;
141
205
142
            result->variableAt(ScopeOffset(property.location().info())).set(exec->vm(), result, JSValue::decode(values[i]));
206
            result->variableAt(ScopeOffset(property.location().info())).set(
207
                exec->vm(), result, jsNumber(29834));
143
        }
208
        }
144
209
145
        if (validationEnabled()) {
210
        if (validationEnabled()) {
- a/Source/JavaScriptCore/ftl/FTLOperations.h +3 lines
Lines 40-45 JSCell* JIT_OPERATION operationNewObjectWithButterfly(ExecState*, Structure*) WT a/Source/JavaScriptCore/ftl/FTLOperations.h_sec1
40
JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
40
JSCell* JIT_OPERATION operationMaterializeObjectInOSR(
41
    ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*) WTF_INTERNAL;
41
    ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*) WTF_INTERNAL;
42
42
43
void JIT_OPERATION operationPopulateObjectInOSR(
44
    ExecState*, ExitTimeObjectMaterialization*, EncodedJSValue*, EncodedJSValue*) WTF_INTERNAL;
45
43
} // extern "C"
46
} // extern "C"
44
47
45
} } // namespace JSC::DFG
48
} } // namespace JSC::DFG
- a/Source/JavaScriptCore/runtime/Options.h -1 / +1 lines
Lines 185-191 typedef const char* optionString; a/Source/JavaScriptCore/runtime/Options.h_sec1
185
    v(double, minimumCallToKnownRate, 0.51, nullptr) \
185
    v(double, minimumCallToKnownRate, 0.51, nullptr) \
186
    v(bool, optimizeNativeCalls, false, nullptr) \
186
    v(bool, optimizeNativeCalls, false, nullptr) \
187
    v(bool, enableMovHintRemoval, true, nullptr) \
187
    v(bool, enableMovHintRemoval, true, nullptr) \
188
    v(bool, enableObjectAllocationSinking, true, nullptr) \
188
    v(bool, enableAllocationCycleSinking, false, nullptr) \
189
    \
189
    \
190
    v(bool, enableConcurrentJIT, true, "allows the DFG / FTL compilation in threads other than the executing JS thread") \
190
    v(bool, enableConcurrentJIT, true, "allows the DFG / FTL compilation in threads other than the executing JS thread") \
191
    v(unsigned, numberOfDFGCompilerThreads, computeNumberOfWorkerThreads(2, 2) - 1, nullptr) \
191
    v(unsigned, numberOfDFGCompilerThreads, computeNumberOfWorkerThreads(2, 2) - 1, nullptr) \
- a/Tools/ChangeLog +12 lines
Lines 1-3 a/Tools/ChangeLog_sec1
1
2015-06-30  Basile Clement  <basile_clement@apple.com>
2
3
        Object cycles should not prevent allocation elimination/sinking
4
        https://bugs.webkit.org/show_bug.cgi?id=143073
5
6
        Reviewed by NOBODY (OOPS!).
7
8
        Cyclic allocation sinking is disabled by default, but we want it
9
        enabled when running tests.
10
11
        * Scripts/run-javascriptcore-tests:
12
1
2015-06-30  Jonathan Davis  <jond@apple.com>
13
2015-06-30  Jonathan Davis  <jond@apple.com>
2
14
3
        Unreviewed. Added myself as a committer.
15
        Unreviewed. Added myself as a committer.
- a/Tools/Scripts/run-javascriptcore-tests +1 lines
Lines 144-149 if (!defined($root) && $buildJSC) { a/Tools/Scripts/run-javascriptcore-tests_sec1
144
my $productDir = jscProductDir();
144
my $productDir = jscProductDir();
145
$ENV{DYLD_FRAMEWORK_PATH} = $productDir;
145
$ENV{DYLD_FRAMEWORK_PATH} = $productDir;
146
$ENV{JSC_timeout} = 60 unless $ENV{JSC_timeout}; # Set a 60 second timeout on all jsc tests (if environment variable not defined already).
146
$ENV{JSC_timeout} = 60 unless $ENV{JSC_timeout}; # Set a 60 second timeout on all jsc tests (if environment variable not defined already).
147
$ENV{JSC_enableAllocationCycleSinking} = 1 unless defined($ENV{JSC_enableAllocationCycleSinking});
147
$ENV{TZ}="US/Pacific"; # Some tests fail if the time zone is not set to US/Pacific (<https://webkit.org/b/136363>)
148
$ENV{TZ}="US/Pacific"; # Some tests fail if the time zone is not set to US/Pacific (<https://webkit.org/b/136363>)
148
setPathForRunningWebKitApp(\%ENV) if isCygwin();
149
setPathForRunningWebKitApp(\%ENV) if isCygwin();
149
150
- a/LayoutTests/ChangeLog +21 lines
Lines 1-3 a/LayoutTests/ChangeLog_sec1
1
2015-06-30  Basile Clement  <basile_clement@apple.com>
2
3
        Object cycles should not prevent allocation elimination/sinking
4
        https://bugs.webkit.org/show_bug.cgi?id=143073
5
6
        Reviewed by NOBODY (OOPS!).
7
8
        Add a few microbenchmarks that show performance improvement when
9
        sinking or elimininating object cycles.
10
11
        * js/regress/script-tests/elidable-new-object-cycle.js: Added.
12
        (sumOfArithSeries):
13
        (foo):
14
        * js/regress/script-tests/sinkable-closure-cycle.js: Added.
15
        (factorial.f):
16
        (factorial):
17
        * js/regress/script-tests/sinkable-new-object-cycle.js: Added.
18
        (sumOfArithSeries):
19
        (verify):
20
        (foo):
21
1
2015-06-30  Chris Dumez  <cdumez@apple.com>
22
2015-06-30  Chris Dumez  <cdumez@apple.com>
2
23
3
        Unreviewed, revert bad WK1 rebaseline done in r186106.
24
        Unreviewed, revert bad WK1 rebaseline done in r186106.
- a/LayoutTests/js/regress/script-tests/elidable-new-object-cycle.js +24 lines
Line 0 a/LayoutTests/js/regress/script-tests/elidable-new-object-cycle.js_sec1
1
function sumOfArithSeries(limit) {
2
    return limit * (limit + 1) / 2;
3
}
4
5
var n = 10000000;
6
7
function foo() {
8
    var result = 0;
9
    for (var i = 0; i < n; ++i) {
10
        var q = {};
11
        var leaf = {f:i, tree: q};
12
        var o = {f:leaf, tree: q};
13
        var p = {f:leaf, tree: q};
14
        q.f = o;
15
        q.g = p;
16
        q.tree = q;
17
        result += q.f.f.f;
18
    }
19
    return result;
20
}
21
22
var result = foo();
23
if (result != sumOfArithSeries(n - 1))
24
    throw "Error: bad result: " + result;
- a/LayoutTests/js/regress/script-tests/sinkable-closure-cycle.js +18 lines
Line 0 a/LayoutTests/js/regress/script-tests/sinkable-closure-cycle.js_sec1
1
function factorial(x) {
2
    if (x < 0) throw "We are not Gamma";
3
4
    if (x === 0)
5
        return 1;
6
7
    function f(i, acc) {
8
        if (i === 1)
9
            return acc;
10
        return f(i - 1, i * acc);
11
    }
12
13
    return f(x, 1);
14
}
15
noInline(factorial);
16
17
for (var i = 0; i < 1000000; ++i)
18
    factorial(i % 3);
- a/LayoutTests/js/regress/script-tests/sinkable-new-object-cycle.js +38 lines
Line 0 a/LayoutTests/js/regress/script-tests/sinkable-new-object-cycle.js_sec1
1
function sumOfArithSeries(limit) {
2
    return limit * (limit + 1) / 2;
3
}
4
5
var n = 10000000;
6
7
function verify(q, i) {
8
    if (q.f == q.g)
9
        throw "Error: q.f == q.g";
10
    if (q.f.f != q.g.f)
11
        throw "Error: q.f.f != q.g.f";
12
    if (q.f.f.f != i)
13
        throw "Error: q.f.f.f != i";
14
}
15
16
function foo() {
17
    var result = 0;
18
    for (var i = 0; i < n; ++i) {
19
        var q = {};
20
        var leaf = {f:i, tree:q};
21
        var o = {f:leaf, tree:q};
22
        var p = {f:leaf, tree:q};
23
        q.f = o;
24
        q.g = p;
25
        q.tree = q;
26
        result += q.f.f.f;
27
        if (!(i % 100))
28
            verify(q, i);
29
    }
30
    return result;
31
}
32
33
noInline(foo);
34
noInline(verify);
35
36
var result = foo();
37
if (result != sumOfArithSeries(n - 1))
38
    throw "Error: bad result: " + result;

Return to Bug 143073