Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8341293: Split field loads through Nested Phis #21270

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 70 additions & 31 deletions src/hotspot/share/opto/escape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -463,8 +463,15 @@ bool ConnectionGraph::compute_escape() {
// if at least one scalar replaceable allocation participates in the merge.
bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
bool found_sr_allocate = false;

int nof_input_phi_nodes = 0;
for (uint i = 1; i < ophi->req(); i++) {
if (ophi->in(i)->is_Phi()) {
// ignore phi node with more than one input phi node
if (++nof_input_phi_nodes > 1) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d. More than one input phi node.", ophi->_idx);)
return false;
}
}
JavaObjectNode* ptn = unique_java_object(ophi->in(i));
if (ptn != nullptr && ptn->scalar_replaceable()) {
AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
Expand All @@ -484,7 +491,7 @@ bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
}
}

NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Cannot reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
return found_sr_allocate;
}

Expand Down Expand Up @@ -527,41 +534,50 @@ bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
// - Phi -> AddP -> Load
// - Phi -> CastPP -> SafePoints
// - Phi -> CastPP -> AddP -> Load
bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
// - Phi -> Phi -> AddP -> Load
bool ConnectionGraph::can_reduce_check_users(Node* n, uint phi_nest_level) const {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);

if (use->is_SafePoint()) {
// Skip Phi -> Phi -> SafePoints and allow only Phi -> SafePoints and Phi -> CastPP -> SafePoints
if (use->is_SafePoint() && (!n->is_Phi() || phi_nest_level < 1)) {
if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
return false;
} else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
return false;
}
} else if (use->is_AddP()) {
assert(phi_nest_level <= 1, "unexpected nesting level");
Node* addp = use;
for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
Node* use_use = addp->fast_out(j);
const Type* load_type = _igvn->type(use_use);

if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn, (phi_nest_level > 0))) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce %s Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", (phi_nest_level > 0)?"nested":"", n->_idx, _invocation, use_use->Name());)
return false;
} else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce %s Phi %d on invocation %d. [Narrow] Klass Load: %s", (phi_nest_level > 0)?"nested":"", n->_idx, _invocation, use_use->Name());)
return false;
}
}
} else if (nesting > 0) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
} else if (phi_nest_level > 0) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), phi_nest_level);)
return false;
} else if (use->is_Phi()) {
if (n->_idx == use->_idx) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Self loop nested Phi");)
return false;
} else if (!can_reduce_phi_check_inputs(use->as_Phi()) || !can_reduce_check_users(use->as_Phi(), phi_nest_level+1)) {
return false;
}
} else if (use->is_CastPP()) {
const Type* cast_t = _igvn->type(use);
if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
#ifndef PRODUCT
if (TraceReduceAllocationMerges) {
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
tty->print_cr("Cannot reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
use->dump();
}
#endif
Expand All @@ -586,7 +602,7 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
if (!can_reduce) {
#ifndef PRODUCT
if (TraceReduceAllocationMerges) {
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
tty->print_cr("Cannot reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
n->dump(5);
}
#endif
Expand All @@ -595,16 +611,16 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
}
}

if (!can_reduce_check_users(use, nesting+1)) {
if (!can_reduce_check_users(use, phi_nest_level+1)) {
return false;
}
} else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
if (!can_reduce_cmp(n, use)) {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
return false;
}
} else {
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Cannot reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
return false;
}
}
Expand All @@ -631,7 +647,7 @@ bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
return false;
}

if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* phi_nest_level: */ 0)) {
return false;
}

Expand Down Expand Up @@ -738,7 +754,7 @@ Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current
return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr));
}

Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) {
Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges) {
const Type* load_type = _igvn->type(curr_load);
Node* nsr_value = _igvn->zerocon(load_type->basic_type());
Node* memory = curr_load->in(MemNode::Memory);
Expand Down Expand Up @@ -788,7 +804,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr

// Takes care of updating CG and split_unique_types worklists due
// to cloned AddP->Load.
updates_after_load_split(data_phi, curr_load, alloc_worklist);
updates_after_load_split(data_phi, curr_load, alloc_worklist, reducible_merges);

return _igvn->transform(data_phi);
}
Expand Down Expand Up @@ -865,7 +881,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr
// \|/
// Phi # "Field" Phi
//
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist, Unique_Node_List &reducible_merges) {
Node* ophi = curr_castpp->in(1);
assert(ophi->is_Phi(), "Expected this to be a Phi node.");

Expand Down Expand Up @@ -913,7 +929,7 @@ void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, Growabl
// 'split_castpp_load_through_phi` method will add an
// 'If-Then-Else-Region` around nullable bases and only load from them
// when the input is not null.
Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist, reducible_merges);
_igvn->replace_node(use_use, phi);

--j;
Expand Down Expand Up @@ -1014,7 +1030,7 @@ void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
// the connection graph. Note that the changes in the CG below
// won't affect the ES of objects since the new nodes have the
// same status as the old ones.
void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) {
void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges) {
assert(data_phi != nullptr, "Output of split_through_phi is null.");
assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
Expand Down Expand Up @@ -1043,8 +1059,14 @@ void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_lo
// The base might not be something that we can create an unique
// type for. If that's the case we are done with that input.
PointsToNode* jobj_ptn = unique_java_object(base);
if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
continue;
if (base->is_Phi()) {
if (!reducible_merges.member(base)) {
continue;
}
} else {
if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
continue;
}
}

// Push to alloc_worklist since the base has an unique_type
Expand All @@ -1067,7 +1089,7 @@ void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_lo
}
}

void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) {
void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges) {
// We'll pass this to 'split_through_phi' so that it'll do the split even
// though the load doesn't have an unique instance type.
bool ignore_missing_instance_id = true;
Expand All @@ -1083,7 +1105,7 @@ void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableAr

// Takes care of updating CG and split_unique_types worklists due to cloned
// AddP->Load.
updates_after_load_split(data_phi, previous_load, alloc_worklist);
updates_after_load_split(data_phi, previous_load, alloc_worklist, reducible_merges);

_igvn->replace_node(previous_load, data_phi);
}
Expand Down Expand Up @@ -1274,7 +1296,24 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No
return true;
}

void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist, Unique_Node_List &reducible_merges) {
Unique_Node_List nested_phis;
// Collect nested phi nodes first because the graph will change while splitting the child/nested phi node.
for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
Node* use = ophi->fast_out(i);
if (use->is_Phi()) {
assert(use->_idx != ophi->_idx, "Unexpected selfloop Phi.");
nested_phis.push(use);
}
}

// Splitting through the child phi nodes ahead of parent phi nodes in nested scenarios is crucial.
// Ensure that the splits are applied to the load fields of child phi nodes before the parent phi nodes take place.
for (uint i = 0; i < nested_phis.size(); i++) {
Node *nested_phi = nested_phis.at(i);
reduce_phi(nested_phi->as_Phi(), alloc_worklist, memnode_worklist, reducible_merges);
}

bool delay = _igvn->delay_transform();
_igvn->set_delay_transform(true);
_igvn->hash_delete(ophi);
Expand Down Expand Up @@ -1302,14 +1341,14 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_wo
// splitting CastPPs we make reference to the inputs of the Cmp that is used
// by the If controlling the CastPP.
for (uint i = 0; i < castpps.size(); i++) {
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist, reducible_merges);
}

for (uint i = 0; i < others.size(); i++) {
Node* use = others.at(i);

if (use->is_AddP()) {
reduce_phi_on_field_access(use, alloc_worklist);
reduce_phi_on_field_access(use, alloc_worklist, reducible_merges);
} else if(use->is_Cmp()) {
reduce_phi_on_cmp(use);
}
Expand Down Expand Up @@ -4461,7 +4500,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// finishes. For now we just try to split out the SR inputs of the merge.
Node* parent = n->in(1);
if (reducible_merges.member(n)) {
reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist, reducible_merges);
#ifdef ASSERT
if (VerifyReduceAllocationMerges) {
reduced_merges.push(n);
Expand Down
12 changes: 6 additions & 6 deletions src/hotspot/share/opto/escape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -592,8 +592,8 @@ class ConnectionGraph: public ArenaObj {
// Methods related to Reduce Allocation Merges
bool has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges);
PhiNode* create_selector(PhiNode* ophi) const;
void updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist);
Node* split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist);
void updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges);
Node* split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges);
void reset_scalar_replaceable_entries(PhiNode* ophi);
bool has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges);
Node* specialize_cmp(Node* base, Node* curr_ctrl);
Expand All @@ -602,15 +602,15 @@ class ConnectionGraph: public ArenaObj {
bool can_reduce_cmp(Node* n, Node* cmp) const;
bool has_been_reduced(PhiNode* n, SafePointNode* sfpt) const;
bool can_reduce_phi(PhiNode* ophi) const;
bool can_reduce_check_users(Node* n, uint nesting) const;
bool can_reduce_check_users(Node* n, uint phi_nest_level) const;
bool can_reduce_phi_check_inputs(PhiNode* ophi) const;

void reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist, Unique_Node_List &reducible_merges);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist, Unique_Node_List &reducible_merges);
void reduce_phi_on_cmp(Node* cmp);
bool reduce_phi_on_safepoints(PhiNode* ophi);
bool reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints);
void reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist, Unique_Node_List &reducible_merges);

void set_not_scalar_replaceable(PointsToNode* ptn NOT_PRODUCT(COMMA const char* reason)) const {
#ifndef PRODUCT
Expand Down
Loading