Skip to content

Commit

Permalink
Refine Symbol::isVolatile tests
Browse files Browse the repository at this point in the history
With the expansion of possible memory ordering semantics from binary volatile
or non-volatile to volatile, acquire/release, opaque, and transparent, all
test whether a symbol is volatile need to be refined depending on the intention
of the test, i.e. is it testing if the symbol is strictly volatile, simply
opaque, or somewhere in between?

Signed-off-by: Spencer Comin <[email protected]>
  • Loading branch information
Spencer-Comin committed Dec 11, 2024
1 parent 54a1183 commit 009393b
Show file tree
Hide file tree
Showing 32 changed files with 138 additions and 165 deletions.
27 changes: 8 additions & 19 deletions compiler/aarch64/codegen/OMRTreeEvaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6069,7 +6069,8 @@ TR::Register *commonLoadEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic op, i

TR::Register *commonLoadEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic op, int32_t size, TR::Register *targetReg, TR::CodeGenerator *cg)
{
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
TR::Symbol *sym = node->getSymbolReference()->getSymbol();
bool needSync = cg->comp()->target().isSMP() && sym->isAtLeastOrStrongerThanAcquireRelease();

node->setRegister(targetReg);
TR::MemoryReference *tempMR = TR::MemoryReference::createWithRootLoadOrStore(cg, node);
Expand Down Expand Up @@ -6142,7 +6143,8 @@ OMR::ARM64::TreeEvaluator::aloadEvaluator(TR::Node *node, TR::CodeGenerator *cg)
TR::TreeEvaluator::generateVFTMaskInstruction(cg, node, tempReg);
}

bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
TR::Symbol *sym = node->getSymbolReference()->getSymbol();
bool needSync = cg->comp()->target().isSMP() && sym->isAtLeastOrStrongerThanAcquireRelease();
if (needSync)
{
generateSynchronizationInstruction(cg, TR::InstOpCode::dmb, node, TR::InstOpCode::ishld);
Expand Down Expand Up @@ -6192,16 +6194,7 @@ TR::Register *commonStoreEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic op,
{
TR::MemoryReference *tempMR = TR::MemoryReference::createWithRootLoadOrStore(cg, node);
tempMR->validateImmediateOffsetAlignment(node, size, cg);

bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool lazyVolatile = false;
if (node->getSymbolReference()->getSymbol()->isShadow() &&
node->getSymbolReference()->getSymbol()->isAcquireRelease() && cg->comp()->target().isSMP())
{
needSync = true;
lazyVolatile = true;
}

TR::Symbol *sym = node->getSymbolReference()->getSymbol();
TR::Node *valueChild;

if (node->getOpCode().isIndirect())
Expand All @@ -6213,7 +6206,7 @@ TR::Register *commonStoreEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic op,
valueChild = node->getFirstChild();
}

if (needSync)
if (cg->comp()->target().isSMP() && sym->isAtLeastOrStrongerThanAcquireRelease())
{
generateSynchronizationInstruction(cg, TR::InstOpCode::dmb, node, TR::InstOpCode::ishst);
}
Expand Down Expand Up @@ -6268,13 +6261,9 @@ TR::Register *commonStoreEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic op,
generateMemSrc1Instruction(cg, op, node, tempMR, cg->evaluate(valueChild));
}

if (needSync)
if (cg->comp()->target().isSMP() && sym->isVolatile())
{
// ordered and lazySet operations will not generate a post-write sync
if (!lazyVolatile)
{
generateSynchronizationInstruction(cg, TR::InstOpCode::dmb, node, TR::InstOpCode::ish);
}
generateSynchronizationInstruction(cg, TR::InstOpCode::dmb, node, TR::InstOpCode::ish);
}

if (valueChildRoot != NULL)
Expand Down
34 changes: 20 additions & 14 deletions compiler/arm/codegen/FPTreeEvaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1202,7 +1202,8 @@ TR::Register *OMR::ARM::TreeEvaluator::floadEvaluator(TR::Node *node, TR::CodeGe
trgReg = floatTrgReg;
}

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down Expand Up @@ -1249,7 +1250,8 @@ TR::Register *OMR::ARM::TreeEvaluator::dloadEvaluator(TR::Node *node, TR::CodeGe
trgReg = doubleTrgReg;
}

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand All @@ -1264,7 +1266,8 @@ TR::Register *OMR::ARM::TreeEvaluator::fstoreEvaluator(TR::Node *node, TR::CodeG
TR::Register *sourceReg = cg->evaluate(firstChild);
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(node, 4, cg);

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down Expand Up @@ -1320,7 +1323,8 @@ TR::Register *OMR::ARM::TreeEvaluator::dstoreEvaluator(TR::Node *node, TR::CodeG
TR::Register *sourceReg = cg->evaluate(firstChild);
bool isUnresolved = node->getSymbolReference()->isUnresolved();

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down Expand Up @@ -1391,7 +1395,8 @@ TR::Register *OMR::ARM::TreeEvaluator::fstoreiEvaluator(TR::Node *node, TR::Code
TR::Register *sourceReg = cg->evaluate(secondChild);
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(node, 4, cg);

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down Expand Up @@ -1445,7 +1450,8 @@ TR::Register *OMR::ARM::TreeEvaluator::dstoreiEvaluator(TR::Node *node, TR::Code
TR::Node *secondChild = node->getSecondChild();
TR::Register *sourceReg = cg->evaluate(secondChild);

if (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() &&
cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down Expand Up @@ -1882,7 +1888,7 @@ TR::Register *OMR::ARM::TreeEvaluator::i2fEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::iload || firstChild->getOpCodeValue() == TR::iloadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use. Use flds to save the move
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(firstChild, 4, cg);
Expand Down Expand Up @@ -1935,7 +1941,7 @@ TR::Register *OMR::ARM::TreeEvaluator::i2dEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::iload || firstChild->getOpCodeValue() == TR::iloadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(firstChild, 4, cg);
Expand Down Expand Up @@ -2007,7 +2013,7 @@ TR::Register *OMR::ARM::TreeEvaluator::f2dEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::fload || firstChild->getOpCodeValue() == TR::floadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(firstChild, 4, cg);
Expand Down Expand Up @@ -2062,7 +2068,7 @@ TR::Register *OMR::ARM::TreeEvaluator::f2iEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::fload || firstChild->getOpCodeValue() == TR::floadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(firstChild, 4, cg);
Expand Down Expand Up @@ -2102,7 +2108,7 @@ TR::Register *OMR::ARM::TreeEvaluator::d2iEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::dload || firstChild->getOpCodeValue() == TR::dloadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(firstChild, 4, cg);
Expand Down Expand Up @@ -2175,7 +2181,7 @@ TR::Register *OMR::ARM::TreeEvaluator::d2fEvaluator(TR::Node *node, TR::CodeGene
(firstChild->getOpCodeValue() == TR::dload || firstChild->getOpCodeValue() == TR::dloadi) &&
(firstChild->getNumChildren() > 0) &&
(firstChild->getFirstChild()->getNumChildren() == 1) &&
!(firstChild->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP()))
!(firstChild->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP()))
{
// Coming from memory, last use
TR::Register *tempReg = cg->allocateRegister(TR_FPR);
Expand Down Expand Up @@ -3140,12 +3146,12 @@ TR::Register *OMR::ARM::TreeEvaluator::fRegStoreEvaluator(TR::Node *node, TR::Co

TR::Register *OMR::ARM::TreeEvaluator::fmaxEvaluator(TR::Node *node, TR::CodeGenerator *cg)
{
return NULL;
return NULL;
}

TR::Register *OMR::ARM::TreeEvaluator::fminEvaluator(TR::Node *node, TR::CodeGenerator *cg)
{
return NULL;
return NULL;
}

#endif
20 changes: 10 additions & 10 deletions compiler/arm/codegen/OMRTreeEvaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3173,7 +3173,7 @@ TR::Instruction *loadAddressConstant(TR::CodeGenerator *cg, TR::Node * node, int
TR::Register *OMR::ARM::TreeEvaluator::iloadEvaluator(TR::Node *node, TR::CodeGenerator *cg)
{
TR::Register *tempReg;
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP();

tempReg = cg->allocateRegister();
if (node->getSymbolReference()->getSymbol()->isInternalPointer())
Expand Down Expand Up @@ -3278,7 +3278,7 @@ TR::Register *OMR::ARM::TreeEvaluator::aloadEvaluator(TR::Node *node, TR::CodeGe
}

TR::Register *tempReg;
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());

if (!node->getSymbolReference()->getSymbol()->isInternalPointer())
{
Expand Down Expand Up @@ -3322,7 +3322,7 @@ TR::Register *OMR::ARM::TreeEvaluator::lloadEvaluator(TR::Node *node, TR::CodeGe
TR::RegisterPair *trgReg = cg->allocateRegisterPair(lowReg, highReg);
TR::Compilation *comp = cg->comp();
bool bigEndian = cg->comp()->target().cpu.isBigEndian();
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());

if (needSync && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
Expand Down Expand Up @@ -3374,7 +3374,7 @@ TR::Register *OMR::ARM::TreeEvaluator::commonLoadEvaluator(TR::Node *node, TR::
{
TR::Register *tempReg = node->setRegister(cg->allocateRegister());
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(node, memSize, cg);
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());
generateTrg1MemInstruction(cg, memToRegOp, node, tempReg, tempMR);
if (needSync && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
{
Expand All @@ -3392,7 +3392,7 @@ TR::Register *OMR::ARM::TreeEvaluator::awrtbarEvaluator(TR::Node *node, TR::Code
TR::Node *firstChild = node->getFirstChild();
TR::Register *sourceRegister;
bool killSource = false;
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());

if (firstChild->getReferenceCount() > 1 && firstChild->getRegister() != NULL)
{
Expand Down Expand Up @@ -3442,7 +3442,7 @@ TR::Register *OMR::ARM::TreeEvaluator::awrtbariEvaluator(TR::Node *node, TR::Cod
TR::Node *secondChild = node->getSecondChild();
TR::Register *sourceRegister;
bool killSource = false;
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());

/* comp->useCompressedPointers() is false for 32bit environment, leaving the compressed pointer support unimplemented. */
if (secondChild->getReferenceCount() > 1 && secondChild->getRegister() != NULL)
Expand Down Expand Up @@ -3499,7 +3499,7 @@ TR::Register *OMR::ARM::TreeEvaluator::lstoreEvaluator(TR::Node *node, TR::CodeG
valueChild = node->getFirstChild();
}
bool bigEndian = cg->comp()->target().cpu.isBigEndian();
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
bool needSync = (node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease() && cg->comp()->target().isSMP());
TR::Register *valueReg = cg->evaluate(valueChild);

if (needSync && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
Expand Down Expand Up @@ -3564,7 +3564,7 @@ TR::Register *OMR::ARM::TreeEvaluator::istoreEvaluator(TR::Node *node, TR::CodeG
TR::Register *OMR::ARM::TreeEvaluator::commonStoreEvaluator(TR::Node *node, TR::InstOpCode::Mnemonic memToRegOp, int32_t memSize, TR::CodeGenerator *cg)
{
TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(node, memSize, cg);
bool needSync = (node->getSymbolReference()->getSymbol()->isSyncVolatile() && cg->comp()->target().isSMP());
const bool supportsDMB = (cg->comp()->target().isSMP() && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor);
TR::Node *valueChild;
if (node->getOpCode().isIndirect())
{
Expand All @@ -3575,12 +3575,12 @@ TR::Register *OMR::ARM::TreeEvaluator::commonStoreEvaluator(TR::Node *node, TR::
valueChild = node->getFirstChild();
}

if (needSync && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (supportsDMB && node->getSymbolReference()->getSymbol()->isAtLeastOrStrongerThanAcquireRelease())
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb_st, node);
}
generateMemSrc1Instruction(cg, memToRegOp, node, tempMR, cg->evaluate(valueChild));
if (needSync && cg->comp()->target().cpu.id() != TR_DefaultARMProcessor)
if (supportsDMB && node->getSymbolReference()->getSymbol()->isVolatile())
{
generateInstruction(cg, (cg->comp()->target().cpu.id() == TR_ARMv6) ? TR::InstOpCode::dmb_v6 : TR::InstOpCode::dmb, node);
}
Expand Down
Loading

0 comments on commit 009393b

Please sign in to comment.