Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openj9
Path: blob/master/runtime/compiler/p/codegen/PPCJNILinkage.cpp
6004 views
1
/*******************************************************************************
2
* Copyright (c) 2000, 2020 IBM Corp. and others
3
*
4
* This program and the accompanying materials are made available under
5
* the terms of the Eclipse Public License 2.0 which accompanies this
6
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
7
* or the Apache License, Version 2.0 which accompanies this distribution and
8
* is available at https://www.apache.org/licenses/LICENSE-2.0.
9
*
10
* This Source Code may also be made available under the following
11
* Secondary Licenses when the conditions for such availability set
12
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
13
* General Public License, version 2 with the GNU Classpath
14
* Exception [1] and GNU General Public License, version 2 with the
15
* OpenJDK Assembly Exception [2].
16
*
17
* [1] https://www.gnu.org/software/classpath/license.html
18
* [2] http://openjdk.java.net/legal/assembly-exception.html
19
*
20
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
21
*******************************************************************************/
22
23
#include "codegen/PPCJNILinkage.hpp"
24
25
#include "codegen/CodeGenerator.hpp"
26
#include "codegen/CodeGeneratorUtils.hpp"
27
#include "codegen/Machine.hpp"
28
#include "codegen/Linkage_inlines.hpp"
29
#include "codegen/LiveRegister.hpp"
30
#include "codegen/RealRegister.hpp"
31
#include "codegen/Register.hpp"
32
#include "codegen/RegisterDependency.hpp"
33
#include "codegen/TreeEvaluator.hpp"
34
#include "compile/ResolvedMethod.hpp"
35
#include "env/CHTable.hpp"
36
#include "env/CompilerEnv.hpp"
37
#include "env/VMJ9.h"
38
#include "env/jittypes.h"
39
#include "il/LabelSymbol.hpp"
40
#include "il/Node.hpp"
41
#include "il/Node_inlines.hpp"
42
#include "infra/SimpleRegex.hpp"
43
#include "p/codegen/CallSnippet.hpp"
44
#include "p/codegen/GenerateInstructions.hpp"
45
#include "p/codegen/PPCEvaluator.hpp"
46
#include "p/codegen/PPCHelperCallSnippet.hpp"
47
#include "p/codegen/PPCInstruction.hpp"
48
#include "p/codegen/PPCTableOfConstants.hpp"
49
#include "p/codegen/StackCheckFailureSnippet.hpp"
50
51
J9::Power::JNILinkage::JNILinkage(TR::CodeGenerator *cg)
52
: J9::Power::PrivateLinkage(cg)
53
{
54
//Copy out SystemLinkage properties. Assumes no objects in TR::PPCLinkageProperties.
55
TR::Linkage *sysLinkage = cg->getLinkage(TR_System);
56
const TR::PPCLinkageProperties& sysLinkageProperties = sysLinkage->getProperties();
57
58
_properties = sysLinkageProperties;
59
60
//Set preservedRegisterMapForGC to PrivateLinkage properties.
61
TR::Linkage *privateLinkage = cg->getLinkage(TR_Private);
62
const TR::PPCLinkageProperties& privateLinkageProperties = privateLinkage->getProperties();
63
64
_properties._preservedRegisterMapForGC = privateLinkageProperties.getPreservedRegisterMapForGC();
65
66
// TODO: JNI linkage should inherit from system linkage to avoid having to do this copying
67
setOffsetToFirstParm(sysLinkage->getOffsetToFirstParm());
68
}
69
70
int32_t J9::Power::JNILinkage::buildArgs(TR::Node *callNode,
71
TR::RegisterDependencyConditions *dependencies,
72
const TR::PPCLinkageProperties &properties)
73
{
74
TR_ASSERT(0, "Should call J9::Power::JNILinkage::buildJNIArgs instead.");
75
return 0;
76
}
77
78
TR::Register *J9::Power::JNILinkage::buildIndirectDispatch(TR::Node *callNode)
79
{
80
TR_ASSERT(0, "Calling J9::Power::JNILinkage::buildIndirectDispatch does not make sense.");
81
return NULL;
82
}
83
84
void J9::Power::JNILinkage::buildVirtualDispatch(TR::Node *callNode,
85
TR::RegisterDependencyConditions *dependencies,
86
uint32_t sizeOfArguments)
87
{
88
TR_ASSERT(0, "Calling J9::Power::JNILinkage::buildVirtualDispatch does not make sense.");
89
}
90
91
const TR::PPCLinkageProperties& J9::Power::JNILinkage::getProperties()
92
{
93
return _properties;
94
}
95
96
TR::Register *J9::Power::JNILinkage::buildDirectDispatch(TR::Node *callNode)
97
{
98
bool aix_style_linkage = (comp()->target().isAIX() || (comp()->target().is64Bit() && comp()->target().isLinux()));
99
TR::LabelSymbol *returnLabel = generateLabelSymbol(cg());
100
TR::SymbolReference *callSymRef = callNode->getSymbolReference();
101
TR::MethodSymbol *callSymbol = callSymRef->getSymbol()->castToMethodSymbol();
102
TR::SymbolReference *gpuHelperSymRef;
103
104
TR::ResolvedMethodSymbol *resolvedMethodSymbol;
105
TR_ResolvedMethod *resolvedMethod;
106
bool dropVMAccess;
107
bool isJNIGCPoint;
108
bool killNonVolatileGPRs;
109
bool checkExceptions;
110
bool createJNIFrame;
111
bool tearDownJNIFrame;
112
bool wrapRefs;
113
bool passReceiver;
114
bool passThread;
115
uintptr_t targetAddress;
116
117
bool crc32m1 = (callSymbol->getRecognizedMethod() == TR::java_util_zip_CRC32_update);
118
bool crc32m2 = (callSymbol->getRecognizedMethod() == TR::java_util_zip_CRC32_updateBytes);
119
bool crc32m3 = (callSymbol->getRecognizedMethod() == TR::java_util_zip_CRC32_updateByteBuffer);
120
121
// TODO: How to handle discontiguous array?
122
// The specialCaseJNI shortcut will mangle register dependencies and use system/C dispatch.
123
// The addresses of the optimized helpers in the server process will not necessarily
124
// match the client-side addresses, so we can't take this shortcut in JITServer mode.
125
bool specialCaseJNI = (crc32m1 || crc32m2 || crc32m3) && !comp()->requiresSpineChecks();
126
127
#ifdef J9VM_OPT_JITSERVER
128
specialCaseJNI = specialCaseJNI && !comp()->isOutOfProcessCompilation();
129
#endif
130
131
bool isGPUHelper = callSymbol->isHelper() && (callSymRef->getReferenceNumber() == TR_estimateGPU ||
132
callSymRef->getReferenceNumber() == TR_getStateGPU ||
133
callSymRef->getReferenceNumber() == TR_regionEntryGPU ||
134
callSymRef->getReferenceNumber() == TR_allocateGPUKernelParms ||
135
callSymRef->getReferenceNumber() == TR_copyToGPU ||
136
callSymRef->getReferenceNumber() == TR_launchGPUKernel ||
137
callSymRef->getReferenceNumber() == TR_copyFromGPU ||
138
callSymRef->getReferenceNumber() == TR_invalidateGPU ||
139
callSymRef->getReferenceNumber() == TR_regionExitGPU ||
140
callSymRef->getReferenceNumber() == TR_flushGPU);
141
142
143
static bool keepVMDuringGPUHelper = feGetEnv("TR_KeepVMDuringGPUHelper") ? true : false;
144
145
TR_J9VMBase *fej9 = (TR_J9VMBase *)(fe());
146
if (!isGPUHelper)
147
{
148
resolvedMethodSymbol = callNode->getSymbol()->castToResolvedMethodSymbol();
149
resolvedMethod = resolvedMethodSymbol->getResolvedMethod();
150
dropVMAccess = !fej9->jniRetainVMAccess(resolvedMethod);
151
isJNIGCPoint = !fej9->jniNoGCPoint(resolvedMethod);
152
killNonVolatileGPRs = isJNIGCPoint;
153
checkExceptions = !fej9->jniNoExceptionsThrown(resolvedMethod);
154
createJNIFrame = !fej9->jniNoNativeMethodFrame(resolvedMethod);
155
tearDownJNIFrame = !fej9->jniNoSpecialTeardown(resolvedMethod);
156
wrapRefs = !fej9->jniDoNotWrapObjects(resolvedMethod);
157
passReceiver = !fej9->jniDoNotPassReceiver(resolvedMethod);
158
passThread = !fej9->jniDoNotPassThread(resolvedMethod);
159
targetAddress = (uintptr_t)resolvedMethod->startAddressForJNIMethod(comp());
160
}
161
else
162
{
163
gpuHelperSymRef = comp()->getSymRefTab()->methodSymRefFromName(comp()->getMethodSymbol(), "com/ibm/jit/JITHelpers", "GPUHelper", "()V", TR::MethodSymbol::Static);
164
resolvedMethodSymbol = gpuHelperSymRef->getSymbol()->castToResolvedMethodSymbol();
165
resolvedMethod = resolvedMethodSymbol->getResolvedMethod();
166
167
if (keepVMDuringGPUHelper || (callSymRef->getReferenceNumber() == TR_copyToGPU || callSymRef->getReferenceNumber() == TR_copyFromGPU) || callSymRef->getReferenceNumber() == TR_regionExitGPU || callSymRef->getReferenceNumber() == TR_flushGPU)
168
dropVMAccess = false; //TR_copyToGPU, TR_copyFromGPU, TR_regionExitGPU, TR_flushGPU (and all others if keepVMDuringGPUHelper is true)
169
else
170
dropVMAccess = true; //TR_regionEntryGPU, TR_launchGPUKernel, TR_estimateGPU, TR_allocateGPUKernelParms, (only if keepVMDuringGPUHelper is false)
171
172
isJNIGCPoint = true;
173
killNonVolatileGPRs = isJNIGCPoint;
174
checkExceptions = false;
175
createJNIFrame = true;
176
tearDownJNIFrame = true;
177
wrapRefs = false; //unused for this code path
178
passReceiver = true;
179
passThread = false;
180
targetAddress = (uintptr_t)callSymbol->getMethodAddress();
181
}
182
183
if (!isGPUHelper && (callSymbol->isPureFunction() || resolvedMethodSymbol->canDirectNativeCall() || specialCaseJNI))
184
{
185
dropVMAccess = false;
186
killNonVolatileGPRs = false;
187
isJNIGCPoint = false;
188
checkExceptions = false;
189
createJNIFrame = false;
190
tearDownJNIFrame = false;
191
if (specialCaseJNI)
192
{
193
wrapRefs = false;
194
passReceiver = false;
195
passThread = false;
196
}
197
}
198
199
TR::Instruction *gcPoint;
200
TR::Register *returnRegister = NULL;
201
TR::RealRegister *stackPtr = cg()->getStackPointerRegister();
202
TR::RealRegister *metaReg = cg()->getMethodMetaDataRegister();
203
TR::Register *gr2Reg, *gr30Reg, *gr31Reg;
204
int32_t argSize;
205
intptr_t aValue;
206
207
TR::RegisterDependencyConditions *deps = new (trHeapMemory()) TR::RegisterDependencyConditions(104,104, trMemory());
208
const TR::PPCLinkageProperties& jniLinkageProperties = getProperties();
209
210
if (killNonVolatileGPRs || dropVMAccess || checkExceptions || tearDownJNIFrame)
211
{
212
gr30Reg = cg()->allocateRegister();
213
gr31Reg = cg()->allocateRegister();
214
TR::addDependency(deps, gr30Reg, TR::RealRegister::gr30, TR_GPR, cg());
215
TR::addDependency(deps, gr31Reg, TR::RealRegister::gr31, TR_GPR, cg());
216
}
217
218
if (killNonVolatileGPRs)
219
{
220
// We need to kill all the non-volatiles so that they'll be in a stack frame in case
221
// gc needs to find them.
222
if (comp()->target().is64Bit())
223
{
224
if (comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_P10))
225
TR::addDependency(deps, NULL, TR::RealRegister::gr16, TR_GPR, cg());
226
}
227
else
228
{
229
// gr15 and gr16 are reserved in 64-bit, normal non-volatile in 32-bit
230
TR::addDependency(deps, NULL, TR::RealRegister::gr15, TR_GPR, cg());
231
TR::addDependency(deps, NULL, TR::RealRegister::gr16, TR_GPR, cg());
232
}
233
TR::addDependency(deps, NULL, TR::RealRegister::gr17, TR_GPR, cg());
234
TR::addDependency(deps, NULL, TR::RealRegister::gr18, TR_GPR, cg());
235
TR::addDependency(deps, NULL, TR::RealRegister::gr19, TR_GPR, cg());
236
TR::addDependency(deps, NULL, TR::RealRegister::gr20, TR_GPR, cg());
237
TR::addDependency(deps, NULL, TR::RealRegister::gr21, TR_GPR, cg());
238
TR::addDependency(deps, NULL, TR::RealRegister::gr22, TR_GPR, cg());
239
TR::addDependency(deps, NULL, TR::RealRegister::gr23, TR_GPR, cg());
240
TR::addDependency(deps, NULL, TR::RealRegister::gr24, TR_GPR, cg());
241
TR::addDependency(deps, NULL, TR::RealRegister::gr25, TR_GPR, cg());
242
TR::addDependency(deps, NULL, TR::RealRegister::gr26, TR_GPR, cg());
243
TR::addDependency(deps, NULL, TR::RealRegister::gr27, TR_GPR, cg());
244
#ifndef J9VM_INTERP_ATOMIC_FREE_JNI
245
if (!dropVMAccess)
246
#endif /* J9VM_INTERP_ATOMIC_FREE_JNI */
247
{
248
TR::addDependency(deps, NULL, TR::RealRegister::gr28, TR_GPR, cg());
249
TR::addDependency(deps, NULL, TR::RealRegister::gr29, TR_GPR, cg());
250
}
251
}
252
253
cg()->machine()->setLinkRegisterKilled(true);
254
cg()->setHasCall();
255
256
argSize = buildJNIArgs(callNode, deps, jniLinkageProperties, specialCaseJNI?false:true, passReceiver, passThread);
257
258
if (aix_style_linkage)
259
{
260
if (specialCaseJNI)
261
gr2Reg = deps->searchPreConditionRegister(TR::RealRegister::gr2);
262
else
263
{
264
gr2Reg = cg()->allocateRegister();
265
TR::addDependency(deps, gr2Reg, TR::RealRegister::gr2, TR_GPR, cg());
266
}
267
}
268
269
if (specialCaseJNI)
270
{
271
// No argument change is needed
272
if (crc32m1)
273
{
274
targetAddress = (uintptr_t)crc32_oneByte;
275
}
276
277
// Argument changes are needed
278
if (crc32m2 || crc32m3)
279
{
280
targetAddress = (uintptr_t)((comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_P8) && comp()->target().cpu.supportsFeature(OMR_FEATURE_PPC_HAS_VSX))?crc32_vpmsum:crc32_no_vpmsum);
281
282
// Assuming pre/postCondition have the same index, we use preCondition to map
283
OMR::RegisterDependencyMap map(deps->getPreConditions()->getRegisterDependency(0), deps->getAddCursorForPre());
284
for (int32_t cnt=0; cnt < deps->getAddCursorForPre(); cnt++)
285
map.addDependency(deps->getPreConditions()->getRegisterDependency(cnt), cnt);
286
287
TR::Register *addrArg, *posArg, *lenArg, *wasteArg;
288
if (crc32m2)
289
{
290
addrArg = map.getSourceWithTarget(TR::RealRegister::gr4);
291
posArg = map.getSourceWithTarget(TR::RealRegister::gr5);
292
lenArg = map.getSourceWithTarget(TR::RealRegister::gr6);
293
294
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addi2, callNode, addrArg, addrArg, TR::Compiler->om.contiguousArrayHeaderSizeInBytes());
295
}
296
297
if (crc32m3)
298
{
299
addrArg = map.getSourceWithTarget(comp()->target().is64Bit()?(TR::RealRegister::gr4):(TR::RealRegister::gr5));
300
posArg = map.getSourceWithTarget(comp()->target().is64Bit()?(TR::RealRegister::gr5):(TR::RealRegister::gr6));
301
lenArg = map.getSourceWithTarget(comp()->target().is64Bit()?(TR::RealRegister::gr6):(TR::RealRegister::gr7));
302
if (!comp()->target().is64Bit())
303
wasteArg = map.getSourceWithTarget(TR::RealRegister::gr4);
304
}
305
generateTrg1Src2Instruction(cg(), TR::InstOpCode::add, callNode, addrArg, addrArg, posArg);
306
307
deps->getPreConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr4), addrArg, TR::RealRegister::gr4, UsesDependentRegister);
308
deps->getPostConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr4), addrArg, TR::RealRegister::gr4, UsesDependentRegister);
309
310
deps->getPreConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr5), lenArg, TR::RealRegister::gr5, UsesDependentRegister);
311
deps->getPostConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr5), lenArg, TR::RealRegister::gr5, UsesDependentRegister);
312
313
deps->getPreConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr6), posArg, TR::RealRegister::gr6, UsesDependentRegister);
314
deps->getPostConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr6), posArg, TR::RealRegister::gr6, UsesDependentRegister);
315
316
if (crc32m3 && !comp()->target().is64Bit())
317
{
318
deps->getPreConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr7), wasteArg, TR::RealRegister::gr7, UsesDependentRegister);
319
deps->getPostConditions()->setDependencyInfo(map.getTargetIndex(TR::RealRegister::gr7), wasteArg, TR::RealRegister::gr7, UsesDependentRegister);
320
}
321
}
322
}
323
324
TR::Register *gr0Reg = deps->searchPreConditionRegister(TR::RealRegister::gr0);
325
TR::Register *gr3Reg = deps->searchPreConditionRegister(TR::RealRegister::gr3);
326
TR::Register *gr11Reg = deps->searchPreConditionRegister(TR::RealRegister::gr11);
327
TR::Register *gr12Reg = deps->searchPreConditionRegister(TR::RealRegister::gr12);
328
TR::Register *cr0Reg = deps->searchPreConditionRegister(TR::RealRegister::cr0);
329
TR::Register *lowReg = NULL, *highReg;
330
331
switch (callNode->getOpCodeValue())
332
{
333
case TR::icall:
334
case TR::acall:
335
if (callNode->getDataType() == TR::Address)
336
{
337
if (!gr3Reg)
338
{
339
gr3Reg = cg()->allocateRegister();
340
returnRegister = cg()->allocateCollectedReferenceRegister();
341
deps->addPreCondition(gr3Reg, TR::RealRegister::gr3);
342
deps->addPostCondition(returnRegister, TR::RealRegister::gr3);
343
}
344
else
345
{
346
returnRegister = deps->searchPostConditionRegister(TR::RealRegister::gr3);
347
}
348
}
349
else
350
{
351
if (!gr3Reg)
352
{
353
gr3Reg = cg()->allocateRegister();
354
returnRegister = gr3Reg;
355
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
356
}
357
else
358
{
359
returnRegister = deps->searchPostConditionRegister(TR::RealRegister::gr3);
360
}
361
}
362
break;
363
case TR::lcall:
364
if (comp()->target().is64Bit())
365
{
366
if (!gr3Reg)
367
{
368
gr3Reg = cg()->allocateRegister();
369
returnRegister = gr3Reg;
370
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
371
}
372
else
373
{
374
returnRegister = deps->searchPostConditionRegister(TR::RealRegister::gr3);
375
}
376
}
377
else
378
{
379
if (!gr3Reg)
380
{
381
gr3Reg = cg()->allocateRegister();
382
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
383
highReg = gr3Reg;
384
}
385
else
386
{
387
highReg = deps->searchPostConditionRegister(TR::RealRegister::gr3);
388
}
389
lowReg = deps->searchPostConditionRegister(TR::RealRegister::gr4);
390
returnRegister = cg()->allocateRegisterPair(lowReg, highReg);
391
}
392
break;
393
case TR::fcall:
394
case TR::dcall:
395
returnRegister = deps->searchPostConditionRegister(jniLinkageProperties.getFloatReturnRegister());
396
if (!gr3Reg)
397
{
398
gr3Reg = cg()->allocateRegister();
399
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
400
}
401
break;
402
case TR::call:
403
if (!gr3Reg)
404
{
405
gr3Reg = cg()->allocateRegister();
406
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
407
}
408
returnRegister = NULL;
409
break;
410
default:
411
if (!gr3Reg)
412
{
413
gr3Reg = cg()->allocateRegister();
414
TR::addDependency(deps, gr3Reg, TR::RealRegister::gr3, TR_GPR, cg());
415
}
416
returnRegister = NULL;
417
TR_ASSERT( false, "Unknown direct call Opcode.");
418
}
419
420
if (createJNIFrame)
421
{
422
// push tag bits (savedA0)
423
int32_t tagBits = fej9->constJNICallOutFrameSpecialTag();
424
// if the current method is simply a wrapper for the JNI call, hide the call-out stack frame
425
if (resolvedMethod == comp()->getCurrentMethod())
426
tagBits |= fej9->constJNICallOutFrameInvisibleTag();
427
loadConstant(cg(), callNode, tagBits, gr11Reg);
428
loadConstant(cg(), callNode, 0, gr12Reg);
429
430
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stu, callNode, TR::MemoryReference::createWithDisplacement(cg(), stackPtr, -TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()), gr11Reg);
431
432
// skip savedPC slot (unused) and push return address (savedCP)
433
cg()->fixedLoadLabelAddressIntoReg(callNode, gr11Reg, returnLabel);
434
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stu, callNode, TR::MemoryReference::createWithDisplacement(cg(), stackPtr, -2*TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()), gr11Reg);
435
436
// begin: mask out the magic bit that indicates JIT frames below
437
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg, fej9->thisThreadGetJavaFrameFlagsOffset(), TR::Compiler->om.sizeofReferenceAddress()), gr12Reg);
438
439
// push flags: use lis instead of lis/ori pair since this is a constant. Save one instr
440
aValue = fej9->constJNICallOutFrameFlags();
441
TR_ASSERT_FATAL((aValue & ~0x7FFF0000) == 0, "Length assumption broken.");
442
generateTrg1ImmInstruction(cg(), TR::InstOpCode::lis, callNode, gr11Reg, aValue>>16);
443
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stu, callNode, TR::MemoryReference::createWithDisplacement(cg(), stackPtr, -TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()),gr11Reg);
444
445
// push the RAM method for the native
446
aValue = (uintptr_t)resolvedMethod->resolvedMethodAddress();
447
// use loadAddressConstantFixed - fixed instruction count 2 32-bit, or 5 64-bit
448
// loadAddressRAM needs a resolved method symbol so the gpuHelper SumRef is passed in instead of
449
// the callSymRef which does not have a resolved method symbol
450
if (isGPUHelper)
451
callNode->setSymbolReference(gpuHelperSymRef);
452
loadAddressRAM(cg(), callNode, aValue, gr11Reg);
453
if (isGPUHelper)
454
callNode->setSymbolReference(callSymRef); //change back to callSymRef afterwards
455
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stu, callNode, TR::MemoryReference::createWithDisplacement(cg(), stackPtr, -TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()),gr11Reg);
456
457
// store out jsp
458
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg,fej9->thisThreadGetJavaSPOffset(), TR::Compiler->om.sizeofReferenceAddress()), stackPtr);
459
460
// store out pc and literals values indicating the callout frame
461
aValue = fej9->constJNICallOutFrameType();
462
TR_ASSERT(aValue>=LOWER_IMMED && aValue<=UPPER_IMMED, "Length assumption broken.");
463
loadConstant(cg(), callNode, (int32_t)aValue, gr11Reg);
464
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg,fej9->thisThreadGetJavaPCOffset(), TR::Compiler->om.sizeofReferenceAddress()), gr11Reg);
465
466
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg,fej9->thisThreadGetJavaLiteralsOffset(), TR::Compiler->om.sizeofReferenceAddress()), gr12Reg);
467
}
468
469
if (passThread)
470
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, callNode, gr3Reg, metaReg);
471
472
// Change if VMAccessLength if this code changes
473
if (dropVMAccess)
474
{
475
// At this point: arguments for the native routine are all in place already, i.e., if there are
476
// more than 32Byte worth of arguments, some of them are on the stack. However,
477
// we potentially go out to call a helper before jumping to the native. There is
478
// no definite guarantee that the helper call will not trash the stack area concerned.
479
// But GAC did make sure it is safe with the current helper implementation. If this
480
// condition changes in the future, we will need some heroic measure to fix it.
481
// Furthermore, this potential call should not touch FP argument registers. ***
482
#ifdef J9VM_INTERP_ATOMIC_FREE_JNI
483
releaseVMAccessAtomicFree(callNode, deps, metaReg, cr0Reg, gr30Reg, gr31Reg);
484
#else
485
releaseVMAccess(callNode, deps, metaReg, gr12Reg, gr30Reg, gr31Reg);
486
#endif /* J9VM_INTERP_ATOMIC_FREE_JNI */
487
}
488
489
// get the address of the function descriptor
490
// use loadAddressConstantFixed - fixed instruction count 2 32-bit, or 5 64-bit
491
TR::Instruction *current = cg()->getAppendInstruction();
492
if (isGPUHelper)
493
loadConstant(cg(), callNode, (int64_t)targetAddress, gr12Reg);
494
else
495
loadAddressJNI(cg(), callNode, targetAddress, gr12Reg);
496
cg()->getJNICallSites().push_front(new (trHeapMemory()) TR_Pair<TR_ResolvedMethod, TR::Instruction>(resolvedMethod, current->getNext())); // the first instruction generated by loadAddressC...
497
498
if (aix_style_linkage &&
499
!(comp()->target().is64Bit() && comp()->target().isLinux() && comp()->target().cpu.isLittleEndian()))
500
{
501
// get the target address
502
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr0Reg, TR::MemoryReference::createWithDisplacement(cg(), gr12Reg, 0, TR::Compiler->om.sizeofReferenceAddress()));
503
// put the target address into the count register
504
generateSrc1Instruction(cg(), TR::InstOpCode::mtctr, callNode, gr0Reg);
505
// load the toc register
506
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr2Reg, TR::MemoryReference::createWithDisplacement(cg(), gr12Reg, TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()));
507
// load the environment register
508
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr11Reg, TR::MemoryReference::createWithDisplacement(cg(), gr12Reg, 2*TR::Compiler->om.sizeofReferenceAddress(), TR::Compiler->om.sizeofReferenceAddress()));
509
}
510
else
511
{
512
// put the target address into the count register
513
generateSrc1Instruction(cg(), TR::InstOpCode::mtctr, callNode, gr12Reg);
514
}
515
516
// call the JNI function
517
if (isJNIGCPoint)
518
{
519
gcPoint = generateInstruction(cg(), TR::InstOpCode::bctrl, callNode);
520
gcPoint->PPCNeedsGCMap(jniLinkageProperties.getPreservedRegisterMapForGC());
521
}
522
else
523
{
524
generateInstruction(cg(), TR::InstOpCode::bctrl, callNode);
525
}
526
generateDepLabelInstruction(cg(), TR::InstOpCode::label, callNode, returnLabel, deps);
527
528
if (dropVMAccess)
529
{
530
// Again, we have dependency on the fact that:
531
// 1) this potential call will not trash the in-register return values
532
// 2) we know GPRs are ok, since OTI saves GPRs before use
533
// 3) FPRs are not so sure, pending GAC's verification ***
534
#ifdef J9VM_INTERP_ATOMIC_FREE_JNI
535
acquireVMAccessAtomicFree(callNode, deps, metaReg, cr0Reg, gr30Reg, gr31Reg);
536
#else
537
acquireVMAccess(callNode, deps, metaReg, gr12Reg, gr30Reg, gr31Reg);
538
#endif /* J9VM_INTERP_ATOMIC_FREE_JNI */
539
}
540
541
// jni methods may not return a full register in some cases so need to get the declared
542
// type so that we sign and zero extend the narrower integer return types properly
543
TR::LabelSymbol *tempLabel = generateLabelSymbol(cg());
544
if (!isGPUHelper)
545
{
546
switch (resolvedMethod->returnType())
547
{
548
case TR::Address:
549
if (wrapRefs)
550
{
551
// Unwrap the returned object if non-null
552
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, callNode, cr0Reg, returnRegister, 0);
553
generateConditionalBranchInstruction(cg(), TR::InstOpCode::beq, callNode, tempLabel, cr0Reg);
554
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, returnRegister, TR::MemoryReference::createWithDisplacement(cg(), returnRegister, 0, TR::Compiler->om.sizeofReferenceAddress()));
555
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, tempLabel);
556
}
557
break;
558
case TR::Int8:
559
if (comp()->getSymRefTab()->isReturnTypeBool(callSymRef))
560
{
561
// For bool return type, must check whether value return by
562
// JNI is zero (false) or non-zero (true) to yield Java result
563
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, callNode, cr0Reg, returnRegister, 0);
564
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, callNode, returnRegister, 1);
565
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, callNode, tempLabel, cr0Reg);
566
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, callNode, returnRegister, 0);
567
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, tempLabel);
568
}
569
else if (resolvedMethod->returnTypeIsUnsigned())
570
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::andi_r, callNode, returnRegister, returnRegister, 0xff);
571
else
572
generateTrg1Src1Instruction(cg(), TR::InstOpCode::extsb, callNode, returnRegister, returnRegister);
573
break;
574
case TR::Int16:
575
if (resolvedMethod->returnTypeIsUnsigned())
576
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::andi_r, callNode, returnRegister, returnRegister, 0xffff);
577
else
578
generateTrg1Src1Instruction(cg(), TR::InstOpCode::extsh, callNode, returnRegister, returnRegister);
579
break;
580
}
581
}
582
583
if (createJNIFrame)
584
{
585
// restore stack pointer: need to deal with growable stack -- stack may already be moved.
586
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr12Reg, TR::MemoryReference::createWithDisplacement(cg(), metaReg, fej9->thisThreadGetJavaLiteralsOffset(), TR::Compiler->om.sizeofReferenceAddress()));
587
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, stackPtr, TR::MemoryReference::createWithDisplacement(cg(), metaReg,fej9->thisThreadGetJavaSPOffset(), TR::Compiler->om.sizeofReferenceAddress()));
588
generateTrg1Src2Instruction(cg(), TR::InstOpCode::add, callNode, stackPtr, gr12Reg, stackPtr);
589
590
if (tearDownJNIFrame)
591
{
592
// must check to see if the ref pool was used and clean them up if so--or we
593
// leave a bunch of pinned garbage behind that screws up the gc quality forever
594
// Again, depend on that this call will not trash return register values, especially FPRs.
595
// Pending GAC's verification. ***
596
uint32_t flagValue = fej9->constJNIReferenceFrameAllocatedFlags();
597
TR::LabelSymbol *refPoolRestartLabel = generateLabelSymbol(cg());
598
TR::SymbolReference *collapseSymRef = cg()->getSymRefTab()->findOrCreateRuntimeHelper(TR_PPCcollapseJNIReferenceFrame);
599
600
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr30Reg, TR::MemoryReference::createWithDisplacement(cg(), stackPtr, fej9->constJNICallOutFrameFlagsOffset(), TR::Compiler->om.sizeofReferenceAddress()));
601
simplifyANDRegImm(callNode, gr31Reg, gr30Reg, flagValue, cg());
602
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, callNode, cr0Reg, gr31Reg, 0);
603
generateConditionalBranchInstruction(cg(), TR::InstOpCode::beq, callNode, refPoolRestartLabel, cr0Reg);
604
generateDepImmSymInstruction(cg(), TR::InstOpCode::bl, callNode, (uintptr_t)collapseSymRef->getMethodAddress(), new (trHeapMemory()) TR::RegisterDependencyConditions(0,0, trMemory()), collapseSymRef, NULL);
605
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, refPoolRestartLabel);
606
}
607
608
// Restore the JIT frame
609
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addi2, callNode, stackPtr, stackPtr, 5*TR::Compiler->om.sizeofReferenceAddress());
610
}
611
612
if (checkExceptions)
613
{
614
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, callNode, gr31Reg, TR::MemoryReference::createWithDisplacement(cg(), metaReg, fej9->thisThreadGetCurrentExceptionOffset(), TR::Compiler->om.sizeofReferenceAddress()));
615
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, callNode, cr0Reg, gr31Reg, 0);
616
617
TR::SymbolReference *throwSymRef = comp()->getSymRefTab()->findOrCreateThrowCurrentExceptionSymbolRef(comp()->getJittedMethodSymbol());
618
TR::LabelSymbol *exceptionSnippetLabel = cg()->lookUpSnippet(TR::Snippet::IsHelperCall, throwSymRef);
619
if (exceptionSnippetLabel == NULL)
620
{
621
exceptionSnippetLabel = generateLabelSymbol(cg());
622
cg()->addSnippet(new (trHeapMemory()) TR::PPCHelperCallSnippet(cg(), callNode, exceptionSnippetLabel, throwSymRef));
623
}
624
gcPoint = generateConditionalBranchInstruction(cg(), TR::InstOpCode::bnel, callNode, exceptionSnippetLabel, cr0Reg);
625
gcPoint->PPCNeedsGCMap(0x10000000);
626
}
627
628
TR::LabelSymbol *depLabel = generateLabelSymbol(cg());
629
generateDepLabelInstruction(cg(), TR::InstOpCode::label, callNode, depLabel, deps->cloneAndFix(cg()));
630
631
callNode->setRegister(returnRegister);
632
633
cg()->freeAndResetTransientLongs();
634
deps->stopUsingDepRegs(cg(), lowReg == NULL ? returnRegister : highReg, lowReg);
635
636
return returnRegister;
637
}
638
639
// tempReg0 to tempReg2 are temporary registers
640
void J9::Power::JNILinkage::releaseVMAccess(TR::Node* callNode, TR::RegisterDependencyConditions* deps, TR::RealRegister* metaReg, TR::Register* tempReg0, TR::Register* tempReg1, TR::Register* tempReg2)
641
{
642
// Release vm access - use hardware registers because of the control flow
643
TR::Instruction *gcPoint;
644
const TR::PPCLinkageProperties &properties = getProperties();
645
646
TR::Register *gr28Reg = cg()->allocateRegister();
647
TR::Register *gr29Reg = cg()->allocateRegister();
648
TR::Register *cr0Reg = deps->searchPreConditionRegister(TR::RealRegister::cr0);
649
650
TR::addDependency(deps, gr28Reg, TR::RealRegister::gr28, TR_GPR, cg());
651
TR::addDependency(deps, gr29Reg, TR::RealRegister::gr29, TR_GPR, cg());
652
653
intptr_t aValue;
654
655
TR_J9VMBase *fej9 = (TR_J9VMBase *)(fe());
656
aValue = fej9->constReleaseVMAccessOutOfLineMask();
657
TR_ASSERT(aValue<=0x7fffffff, "Value assumption broken.");
658
// use loadIntConstantFixed - fixed instruction count 2 with int32_t argument
659
cg()->loadIntConstantFixed(callNode, (int32_t) aValue, tempReg1);
660
661
aValue = fej9->constReleaseVMAccessMask();
662
// use loadAddressConstantFixed - fixed instruction count 2 32-bit, or 5 64-bit
663
cg()->loadAddressConstantFixed(callNode, aValue, tempReg0, NULL, NULL, -1, false);
664
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addi2, callNode, gr28Reg, metaReg,
665
fej9->thisThreadGetPublicFlagsOffset());
666
generateInstruction(cg(), TR::InstOpCode::lwsync, callNode); // This is necessary for the fast path but redundant for the slow path
667
TR::LabelSymbol *loopHead = generateLabelSymbol(cg());
668
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, loopHead);
669
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_larx, callNode, tempReg2, TR::MemoryReference::createWithIndexReg(cg(), NULL, gr28Reg, TR::Compiler->om.sizeofReferenceAddress()));
670
generateTrg1Src2Instruction(cg(), TR::InstOpCode::and_r, callNode, gr29Reg, tempReg2, tempReg1);
671
generateTrg1Src2Instruction(cg(), TR::InstOpCode::AND, callNode, tempReg2, tempReg2, tempReg0);
672
673
TR::LabelSymbol *longReleaseLabel = generateLabelSymbol(cg());
674
TR::LabelSymbol *longReleaseSnippetLabel;
675
TR::LabelSymbol *doneLabel = generateLabelSymbol(cg());
676
TR::SymbolReference *relVMSymRef = comp()->getSymRefTab()->findOrCreateReleaseVMAccessSymbolRef(comp()->getJittedMethodSymbol());
677
longReleaseSnippetLabel = cg()->lookUpSnippet(TR::Snippet::IsHelperCall, relVMSymRef);
678
if (longReleaseSnippetLabel == NULL)
679
{
680
longReleaseSnippetLabel = generateLabelSymbol(cg());
681
cg()->addSnippet(new (trHeapMemory()) TR::PPCHelperCallSnippet(cg(), callNode, longReleaseSnippetLabel, relVMSymRef));
682
}
683
684
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, callNode, longReleaseLabel, cr0Reg);
685
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stcx_r, callNode, TR::MemoryReference::createWithIndexReg(cg(), NULL, gr28Reg, TR::Compiler->om.sizeofReferenceAddress()), tempReg2);
686
687
if (comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_GP))
688
// use PPC AS branch hint
689
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, PPCOpProp_BranchUnlikely, callNode, loopHead, cr0Reg);
690
else
691
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, callNode, loopHead, cr0Reg);
692
693
generateLabelInstruction(cg(), TR::InstOpCode::b, callNode, doneLabel);
694
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, longReleaseLabel);
695
gcPoint = generateLabelInstruction(cg(), TR::InstOpCode::bl, callNode, longReleaseSnippetLabel);
696
gcPoint->PPCNeedsGCMap(~(properties.getPreservedRegisterMapForGC()));
697
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, doneLabel);
698
// end of release vm access (spin lock)
699
}
700
701
// tempReg0 to tempReg2 are temporary registers
702
void J9::Power::JNILinkage::acquireVMAccess(TR::Node* callNode, TR::RegisterDependencyConditions* deps, TR::RealRegister* metaReg, TR::Register* tempReg0, TR::Register* tempReg1, TR::Register* tempReg2)
703
{
704
// Acquire VM Access
705
TR::Instruction *gcPoint;
706
707
TR::Register *cr0Reg = deps->searchPreConditionRegister(TR::RealRegister::cr0);
708
709
TR_J9VMBase *fej9 = (TR_J9VMBase *)(fe());
710
loadConstant(cg(), callNode, (int32_t)fej9->constAcquireVMAccessOutOfLineMask(), tempReg1);
711
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addi2, callNode, tempReg0, metaReg,
712
fej9->thisThreadGetPublicFlagsOffset());
713
TR::LabelSymbol *loopHead2 = generateLabelSymbol(cg());
714
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, loopHead2);
715
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_larx, PPCOpProp_LoadReserveExclusiveAccess, callNode, tempReg2, TR::MemoryReference::createWithIndexReg(cg(), NULL, tempReg0, TR::Compiler->om.sizeofReferenceAddress()));
716
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, callNode, cr0Reg, tempReg2, 0);
717
TR::LabelSymbol *longReacquireLabel = generateLabelSymbol(cg());
718
TR::LabelSymbol *longReacquireSnippetLabel;
719
TR::LabelSymbol *doneLabel2 = generateLabelSymbol(cg());
720
TR::SymbolReference *acqVMSymRef = comp()->getSymRefTab()->findOrCreateAcquireVMAccessSymbolRef(comp()->getJittedMethodSymbol());
721
longReacquireSnippetLabel = cg()->lookUpSnippet(TR::Snippet::IsHelperCall, acqVMSymRef);
722
if (longReacquireSnippetLabel == NULL)
723
{
724
longReacquireSnippetLabel = generateLabelSymbol(cg());
725
cg()->addSnippet(new (trHeapMemory()) TR::PPCHelperCallSnippet(cg(), callNode, longReacquireSnippetLabel, acqVMSymRef));
726
}
727
728
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, callNode, longReacquireLabel, cr0Reg);
729
generateMemSrc1Instruction(cg(),TR::InstOpCode::Op_stcx_r, callNode, TR::MemoryReference::createWithIndexReg(cg(), NULL, tempReg0, TR::Compiler->om.sizeofReferenceAddress()), tempReg1);
730
731
if (comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_GP))
732
// use PPC AS branch hint
733
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, PPCOpProp_BranchUnlikely, callNode, loopHead2, cr0Reg);
734
else
735
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, callNode, loopHead2, cr0Reg);
736
737
generateInstruction(cg(), TR::InstOpCode::isync, callNode);
738
generateLabelInstruction(cg(), TR::InstOpCode::b, callNode, doneLabel2);
739
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, longReacquireLabel);
740
gcPoint = generateLabelInstruction(cg(), TR::InstOpCode::bl, callNode, longReacquireSnippetLabel);
741
gcPoint->PPCNeedsGCMap(0x00000000);
742
generateLabelInstruction(cg(), TR::InstOpCode::label, callNode, doneLabel2);
743
// end of reacquire VM Access
744
}
745
746
#ifdef J9VM_INTERP_ATOMIC_FREE_JNI
747
void J9::Power::JNILinkage::releaseVMAccessAtomicFree(TR::Node* callNode, TR::RegisterDependencyConditions* deps, TR::RealRegister* metaReg, TR::Register* cr0Reg, TR::Register* tempReg1, TR::Register* tempReg2)
748
{
749
TR_J9VMBase *fej9 = (TR_J9VMBase *)fe();
750
751
#if !defined(J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH)
752
generateInstruction(cg(), TR::InstOpCode::lwsync, callNode);
753
#endif /* !J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH */
754
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, callNode, tempReg1, 1);
755
generateMemSrc1Instruction(cg(), TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg, (int32_t)offsetof(struct J9VMThread, inNative), TR::Compiler->om.sizeofReferenceAddress()), tempReg1);
756
#if !defined(J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH)
757
generateInstruction(cg(), TR::InstOpCode::sync, callNode);
758
#endif /* !J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH */
759
generateTrg1MemInstruction(cg(), TR::InstOpCode::Op_load, callNode, tempReg1, TR::MemoryReference::createWithDisplacement(cg(), metaReg, fej9->thisThreadGetPublicFlagsOffset(), TR::Compiler->om.sizeofReferenceAddress()));
760
TR_ASSERT_FATAL(J9_PUBLIC_FLAGS_VM_ACCESS >= LOWER_IMMED && J9_PUBLIC_FLAGS_VM_ACCESS <= UPPER_IMMED, "VM access bit must be immediate");
761
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::Op_cmpli, callNode, cr0Reg, tempReg1, J9_PUBLIC_FLAGS_VM_ACCESS);
762
763
TR::SymbolReference *jitReleaseVMAccessSymRef = comp()->getSymRefTab()->findOrCreateReleaseVMAccessSymbolRef(comp()->getJittedMethodSymbol());
764
TR::LabelSymbol *releaseVMAccessSnippetLabel = cg()->lookUpSnippet(TR::Snippet::IsHelperCall, jitReleaseVMAccessSymRef);
765
if (!releaseVMAccessSnippetLabel)
766
{
767
releaseVMAccessSnippetLabel = generateLabelSymbol(cg());
768
cg()->addSnippet(new (trHeapMemory()) TR::PPCHelperCallSnippet(cg(), callNode, releaseVMAccessSnippetLabel, jitReleaseVMAccessSymRef));
769
}
770
771
if (comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_GP))
772
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bnel, PPCOpProp_BranchUnlikely, callNode, releaseVMAccessSnippetLabel, cr0Reg);
773
else
774
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bnel, callNode, releaseVMAccessSnippetLabel, cr0Reg);
775
}
776
777
void J9::Power::JNILinkage::acquireVMAccessAtomicFree(TR::Node* callNode, TR::RegisterDependencyConditions* deps, TR::RealRegister* metaReg, TR::Register* cr0Reg, TR::Register* tempReg1, TR::Register* tempReg2)
778
{
779
TR_J9VMBase *fej9 = (TR_J9VMBase *)fe();
780
781
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, callNode, tempReg1, 0);
782
generateMemSrc1Instruction(cg(), TR::InstOpCode::Op_st, callNode, TR::MemoryReference::createWithDisplacement(cg(), metaReg, (int32_t)offsetof(struct J9VMThread, inNative), TR::Compiler->om.sizeofReferenceAddress()), tempReg1);
783
#if !defined(J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH)
784
generateInstruction(cg(), TR::InstOpCode::sync, callNode);
785
#endif /* !J9VM_INTERP_ATOMIC_FREE_JNI_USES_FLUSH */
786
generateTrg1MemInstruction(cg(), TR::InstOpCode::Op_load, callNode, tempReg1, TR::MemoryReference::createWithDisplacement(cg(), metaReg, fej9->thisThreadGetPublicFlagsOffset(), TR::Compiler->om.sizeofReferenceAddress()));
787
TR_ASSERT_FATAL(J9_PUBLIC_FLAGS_VM_ACCESS >= LOWER_IMMED && J9_PUBLIC_FLAGS_VM_ACCESS <= UPPER_IMMED, "VM access bit must be immediate");
788
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::Op_cmpli, callNode, cr0Reg, tempReg1, J9_PUBLIC_FLAGS_VM_ACCESS);
789
790
TR::SymbolReference *jitAcquireVMAccessSymRef = comp()->getSymRefTab()->findOrCreateAcquireVMAccessSymbolRef(comp()->getJittedMethodSymbol());
791
TR::LabelSymbol *acquireVMAccessSnippetLabel = cg()->lookUpSnippet(TR::Snippet::IsHelperCall, jitAcquireVMAccessSymRef);
792
if (!acquireVMAccessSnippetLabel)
793
{
794
acquireVMAccessSnippetLabel = generateLabelSymbol(cg());
795
cg()->addSnippet(new (trHeapMemory()) TR::PPCHelperCallSnippet(cg(), callNode, acquireVMAccessSnippetLabel, jitAcquireVMAccessSymRef));
796
}
797
798
if (comp()->target().cpu.isAtLeast(OMR_PROCESSOR_PPC_GP))
799
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bnel, PPCOpProp_BranchUnlikely, callNode, acquireVMAccessSnippetLabel, cr0Reg);
800
else
801
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bnel, callNode, acquireVMAccessSnippetLabel, cr0Reg);
802
}
803
#endif /* J9VM_INTERP_ATOMIC_FREE_JNI */
804
805
int32_t J9::Power::JNILinkage::buildJNIArgs(TR::Node *callNode,
806
TR::RegisterDependencyConditions *dependencies,
807
const TR::PPCLinkageProperties &properties,
808
bool isFastJNI,
809
bool passReceiver,
810
bool implicitEnvArg)
811
812
{
813
//TODO: Temporary clone of PPCSystemLinkage::buildArgs. Both PPCSystemLinkage::buildArgs and
814
//this buildJNIArgs will be refactored for commonality.
815
816
TR::PPCMemoryArgument *pushToMemory = NULL;
817
TR::Register *tempRegister;
818
int32_t argIndex = 0, memArgs = 0, i;
819
int32_t argSize = 0;
820
uint32_t numIntegerArgs = 0;
821
uint32_t numFloatArgs = 0;
822
823
TR::Node *child;
824
void *smark;
825
TR::DataType resType = callNode->getType();
826
TR_Array<TR::Register *> &tempLongRegisters = cg()->getTransientLongRegisters();
827
TR::Symbol * callSymbol = callNode->getSymbolReference()->getSymbol();
828
829
uint32_t firstArgumentChild = callNode->getFirstArgumentIndex();
830
831
bool aix_style_linkage = (comp()->target().isAIX() || (comp()->target().is64Bit() && comp()->target().isLinux()));
832
833
if (isFastJNI) // Account for extra parameters (env and obj)
834
{
835
if (implicitEnvArg)
836
numIntegerArgs += 1;
837
if (!passReceiver)
838
{
839
// Evaluate as usual if necessary, but don't actually pass it to the callee
840
TR::Node *firstArgChild = callNode->getChild(firstArgumentChild);
841
if (firstArgChild->getReferenceCount() > 1)
842
{
843
switch (firstArgChild->getDataType())
844
{
845
case TR::Int32:
846
pushIntegerWordArg(firstArgChild);
847
break;
848
case TR::Int64:
849
pushLongArg(firstArgChild);
850
break;
851
case TR::Address:
852
pushAddressArg(firstArgChild);
853
break;
854
default:
855
TR_ASSERT( false, "Unexpected first child type");
856
}
857
}
858
else
859
cg()->recursivelyDecReferenceCount(firstArgChild);
860
firstArgumentChild += 1;
861
}
862
}
863
864
/* Step 1 - figure out how many arguments are going to be spilled to memory i.e. not in registers */
865
for (i = firstArgumentChild; i < callNode->getNumChildren(); i++)
866
{
867
child = callNode->getChild(i);
868
switch (child->getDataType())
869
{
870
case TR::Int8:
871
case TR::Int16:
872
case TR::Int32:
873
case TR::Address:
874
if (numIntegerArgs >= properties.getNumIntArgRegs())
875
memArgs++;
876
numIntegerArgs++;
877
break;
878
case TR::Int64:
879
if (comp()->target().is64Bit())
880
{
881
if (numIntegerArgs >= properties.getNumIntArgRegs())
882
memArgs++;
883
numIntegerArgs++;
884
}
885
else
886
{
887
if (aix_style_linkage)
888
{
889
if (numIntegerArgs == properties.getNumIntArgRegs()-1)
890
memArgs++;
891
else if (numIntegerArgs > properties.getNumIntArgRegs()-1)
892
memArgs += 2;
893
}
894
else
895
{
896
if (numIntegerArgs & 1)
897
numIntegerArgs++;
898
if (numIntegerArgs >= properties.getNumIntArgRegs())
899
memArgs += 2;
900
}
901
numIntegerArgs += 2;
902
}
903
break;
904
case TR::Float:
905
if (aix_style_linkage)
906
{
907
if (numIntegerArgs >= properties.getNumIntArgRegs())
908
memArgs++;
909
numIntegerArgs++;
910
}
911
else
912
{
913
if (numFloatArgs >= properties.getNumFloatArgRegs())
914
memArgs++;
915
}
916
numFloatArgs++;
917
break;
918
case TR::Double:
919
if (aix_style_linkage)
920
{
921
if (comp()->target().is64Bit())
922
{
923
if (numIntegerArgs >= properties.getNumIntArgRegs())
924
memArgs++;
925
numIntegerArgs++;
926
}
927
else
928
{
929
if (numIntegerArgs >= properties.getNumIntArgRegs()-1)
930
memArgs++;
931
numIntegerArgs += 2;
932
}
933
}
934
else
935
{
936
if (numFloatArgs >= properties.getNumFloatArgRegs())
937
memArgs++;
938
}
939
numFloatArgs++;
940
break;
941
case TR::VectorDouble:
942
TR_ASSERT(false, "JNI dispatch: VectorDouble argument not expected");
943
break;
944
case TR::Aggregate:
945
{
946
size_t size = child->getSymbolReference()->getSymbol()->getSize();
947
size = (size + sizeof(uintptr_t) - 1) & (~(sizeof(uintptr_t) - 1)); // round up the size
948
size_t slots = size / sizeof(uintptr_t);
949
950
if (numIntegerArgs >= properties.getNumIntArgRegs())
951
memArgs += slots;
952
else
953
memArgs += (properties.getNumIntArgRegs() - numIntegerArgs) > slots ? 0: slots - (properties.getNumIntArgRegs() - numIntegerArgs);
954
numIntegerArgs += slots;
955
}
956
break;
957
default:
958
TR_ASSERT(false, "Argument type %s is not supported\n", child->getDataType().toString());
959
}
960
}
961
962
// From here, down, any new stack allocations will expire / die when the function returns
963
TR::StackMemoryRegion stackMemoryRegion(*trMemory());
964
965
/* End result of Step 1 - determined number of memory arguments! */
966
if (memArgs > 0)
967
{
968
pushToMemory = new (trStackMemory()) TR::PPCMemoryArgument[memArgs];
969
}
970
971
numIntegerArgs = 0;
972
numFloatArgs = 0;
973
974
if (isFastJNI && implicitEnvArg) // Account for extra parameter (env)
975
{
976
// first argument is JNIenv
977
numIntegerArgs += 1;
978
if (aix_style_linkage)
979
argSize += TR::Compiler->om.sizeofReferenceAddress();
980
}
981
982
for (i = firstArgumentChild; i < callNode->getNumChildren(); i++)
983
{
984
TR::MemoryReference *mref=NULL;
985
TR::Register *argRegister;
986
bool checkSplit = true;
987
988
child = callNode->getChild(i);
989
TR::DataType childType = child->getDataType();
990
991
switch (childType)
992
{
993
case TR::Int8:
994
case TR::Int16:
995
case TR::Int32:
996
case TR::Address: // have to do something for GC maps here
997
if (isFastJNI && childType==TR::Address)
998
{
999
argRegister = pushJNIReferenceArg(child);
1000
checkSplit = false;
1001
}
1002
else
1003
if (childType == TR::Address && !isFastJNI)
1004
argRegister = pushAddressArg(child);
1005
else
1006
argRegister = pushIntegerWordArg(child);
1007
1008
if (numIntegerArgs < properties.getNumIntArgRegs())
1009
{
1010
// Sign extend non-64bit Integers on LinuxPPC64 as required by the ABI
1011
// The AIX64 ABI also specifies this behaviour but we observed otherwise
1012
// We can do this blindly in Java since there is no unsigned types
1013
// WCode will have to do something better!
1014
if (isFastJNI &&
1015
(comp()->target().isLinux() && comp()->target().is64Bit()) &&
1016
childType != TR::Address)
1017
generateTrg1Src1Instruction(cg(), TR::InstOpCode::extsw, callNode, argRegister, argRegister);
1018
1019
if (checkSplit && !cg()->canClobberNodesRegister(child, 0))
1020
{
1021
if (argRegister->containsCollectedReference())
1022
tempRegister = cg()->allocateCollectedReferenceRegister();
1023
else
1024
tempRegister = cg()->allocateRegister();
1025
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, callNode, tempRegister, argRegister);
1026
argRegister = tempRegister;
1027
}
1028
if (numIntegerArgs == 0 &&
1029
(resType.isAddress() || resType.isInt32() || resType.isInt64()))
1030
{
1031
TR::Register *resultReg;
1032
if (resType.isAddress())
1033
resultReg = cg()->allocateCollectedReferenceRegister();
1034
else
1035
resultReg = cg()->allocateRegister();
1036
dependencies->addPreCondition(argRegister, TR::RealRegister::gr3);
1037
dependencies->addPostCondition(resultReg, TR::RealRegister::gr3);
1038
}
1039
else if (comp()->target().is32Bit() && numIntegerArgs == 1 && resType.isInt64())
1040
{
1041
TR::Register *resultReg = cg()->allocateRegister();
1042
dependencies->addPreCondition(argRegister, TR::RealRegister::gr4);
1043
dependencies->addPostCondition(resultReg, TR::RealRegister::gr4);
1044
}
1045
else
1046
{
1047
TR::addDependency(dependencies, argRegister, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1048
}
1049
}
1050
else // numIntegerArgs >= properties.getNumIntArgRegs()
1051
{
1052
mref = getOutgoingArgumentMemRef(argSize, argRegister,TR::InstOpCode::Op_st, pushToMemory[argIndex++], TR::Compiler->om.sizeofReferenceAddress(), properties);
1053
//printf("integral or address memory arg, offset = %d\n", argSize);
1054
if (!aix_style_linkage)
1055
argSize += TR::Compiler->om.sizeofReferenceAddress();
1056
}
1057
numIntegerArgs++;
1058
if (aix_style_linkage)
1059
argSize += TR::Compiler->om.sizeofReferenceAddress();
1060
break;
1061
case TR::Int64:
1062
argRegister = pushLongArg(child);
1063
if (!aix_style_linkage)
1064
{
1065
if (numIntegerArgs & 1)
1066
{
1067
if (numIntegerArgs < properties.getNumIntArgRegs())
1068
TR::addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1069
numIntegerArgs++;
1070
}
1071
}
1072
if (numIntegerArgs < properties.getNumIntArgRegs())
1073
{
1074
if (!cg()->canClobberNodesRegister(child, 0))
1075
{
1076
if (comp()->target().is64Bit())
1077
{
1078
tempRegister = cg()->allocateRegister();
1079
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, callNode, tempRegister, argRegister);
1080
argRegister = tempRegister;
1081
}
1082
else
1083
{
1084
tempRegister = cg()->allocateRegister();
1085
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, callNode, tempRegister, argRegister->getRegisterPair()->getHighOrder());
1086
argRegister = cg()->allocateRegisterPair(argRegister->getRegisterPair()->getLowOrder(), tempRegister);
1087
tempLongRegisters.add(argRegister);
1088
}
1089
}
1090
if (numIntegerArgs == 0 &&
1091
(resType.isAddress() || resType.isInt32() || resType.isInt64()))
1092
{
1093
TR::Register *resultReg;
1094
if (resType.isAddress())
1095
resultReg = cg()->allocateCollectedReferenceRegister();
1096
else
1097
resultReg = cg()->allocateRegister();
1098
if (comp()->target().is64Bit())
1099
dependencies->addPreCondition(argRegister, TR::RealRegister::gr3);
1100
else
1101
dependencies->addPreCondition(argRegister->getRegisterPair()->getHighOrder(), TR::RealRegister::gr3);
1102
dependencies->addPostCondition(resultReg, TR::RealRegister::gr3);
1103
}
1104
else if (comp()->target().is32Bit() && numIntegerArgs == 1 && resType.isInt64())
1105
{
1106
TR::Register *resultReg = cg()->allocateRegister();
1107
dependencies->addPreCondition(argRegister, TR::RealRegister::gr4);
1108
dependencies->addPostCondition(resultReg, TR::RealRegister::gr4);
1109
}
1110
else
1111
{
1112
if (comp()->target().is64Bit())
1113
TR::addDependency(dependencies, argRegister, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1114
else
1115
TR::addDependency(dependencies, argRegister->getRegisterPair()->getHighOrder(), properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1116
}
1117
if (comp()->target().is32Bit())
1118
{
1119
if (numIntegerArgs < properties.getNumIntArgRegs()-1)
1120
{
1121
if (!cg()->canClobberNodesRegister(child, 0))
1122
{
1123
TR::Register *over_lowReg = argRegister->getRegisterPair()->getLowOrder();
1124
tempRegister = cg()->allocateRegister();
1125
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, callNode, tempRegister, over_lowReg);
1126
argRegister->getRegisterPair()->setLowOrder(tempRegister, cg());
1127
}
1128
if (numIntegerArgs == 0 && resType.isInt64())
1129
{
1130
TR::Register *resultReg = cg()->allocateRegister();
1131
dependencies->addPreCondition(argRegister->getRegisterPair()->getLowOrder(), TR::RealRegister::gr4);
1132
dependencies->addPostCondition(resultReg, TR::RealRegister::gr4);
1133
}
1134
else
1135
TR::addDependency(dependencies, argRegister->getRegisterPair()->getLowOrder(), properties.getIntegerArgumentRegister(numIntegerArgs+1), TR_GPR, cg());
1136
}
1137
else // numIntegerArgs == properties.getNumIntArgRegs()-1
1138
{
1139
mref = getOutgoingArgumentMemRef(argSize+4, argRegister->getRegisterPair()->getLowOrder(), TR::InstOpCode::stw, pushToMemory[argIndex++], 4, properties);
1140
}
1141
numIntegerArgs ++;
1142
}
1143
}
1144
else // numIntegerArgs >= properties.getNumIntArgRegs()
1145
{
1146
if (comp()->target().is64Bit())
1147
{
1148
mref = getOutgoingArgumentMemRef(argSize, argRegister, TR::InstOpCode::std, pushToMemory[argIndex++], TR::Compiler->om.sizeofReferenceAddress(), properties);
1149
}
1150
else
1151
{
1152
if (!aix_style_linkage)
1153
argSize = (argSize + 4) & (~7);
1154
mref = getOutgoingArgumentMemRef(argSize, argRegister->getRegisterPair()->getHighOrder(), TR::InstOpCode::stw, pushToMemory[argIndex++], 4, properties);
1155
mref = getOutgoingArgumentMemRef(argSize+4, argRegister->getRegisterPair()->getLowOrder(), TR::InstOpCode::stw, pushToMemory[argIndex++], 4, properties);
1156
numIntegerArgs ++;
1157
if (!aix_style_linkage)
1158
argSize += 8;
1159
}
1160
}
1161
numIntegerArgs ++;
1162
if (aix_style_linkage)
1163
argSize += 8;
1164
break;
1165
1166
case TR::Float:
1167
argRegister = pushFloatArg(child);
1168
for (int r = 0; r < ((childType == TR::Float) ? 1: 2); r++)
1169
{
1170
TR::Register * argReg;
1171
if (childType == TR::Float)
1172
argReg = argRegister;
1173
else
1174
argReg = (r == 0) ? argRegister->getHighOrder() : argRegister->getLowOrder();
1175
1176
if (numFloatArgs < properties.getNumFloatArgRegs())
1177
{
1178
if (!cg()->canClobberNodesRegister(child, 0))
1179
{
1180
tempRegister = cg()->allocateRegister(TR_FPR);
1181
generateTrg1Src1Instruction(cg(), TR::InstOpCode::fmr, callNode, tempRegister, argReg);
1182
argReg = tempRegister;
1183
}
1184
if (numFloatArgs == 0 && resType.isFloatingPoint())
1185
{
1186
TR::Register *resultReg;
1187
if (resType.getDataType() == TR::Float)
1188
resultReg = cg()->allocateSinglePrecisionRegister();
1189
else
1190
resultReg = cg()->allocateRegister(TR_FPR);
1191
1192
dependencies->addPreCondition(argReg, (r == 0) ? TR::RealRegister::fp1 : TR::RealRegister::fp2);
1193
dependencies->addPostCondition(resultReg, (r == 0) ? TR::RealRegister::fp1 : TR::RealRegister::fp2);
1194
}
1195
else
1196
TR::addDependency(dependencies, argReg, properties.getFloatArgumentRegister(numFloatArgs), TR_FPR, cg());
1197
}
1198
else if (!aix_style_linkage)
1199
// numFloatArgs >= properties.getNumFloatArgRegs()
1200
{
1201
mref = getOutgoingArgumentMemRef(argSize, argReg, TR::InstOpCode::stfs, pushToMemory[argIndex++], 4, properties);
1202
argSize += 4;
1203
}
1204
1205
if (aix_style_linkage)
1206
{
1207
if (numIntegerArgs < properties.getNumIntArgRegs())
1208
{
1209
if (numIntegerArgs==0 && resType.isAddress())
1210
{
1211
TR::Register *aReg = cg()->allocateRegister();
1212
TR::Register *bReg = cg()->allocateCollectedReferenceRegister();
1213
dependencies->addPreCondition(aReg, TR::RealRegister::gr3);
1214
dependencies->addPostCondition(bReg, TR::RealRegister::gr3);
1215
}
1216
else
1217
TR::addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1218
}
1219
else // numIntegerArgs >= properties.getNumIntArgRegs()
1220
{
1221
if (comp()->target().is64Bit() && comp()->target().isLinux())
1222
{
1223
mref = getOutgoingArgumentMemRef(argSize+4, argReg, TR::InstOpCode::stfs, pushToMemory[argIndex++], 4, properties);
1224
}
1225
else
1226
{
1227
mref = getOutgoingArgumentMemRef(argSize, argReg, TR::InstOpCode::stfs, pushToMemory[argIndex++], 4, properties);
1228
}
1229
}
1230
1231
numIntegerArgs++;
1232
}
1233
numFloatArgs++;
1234
if (aix_style_linkage)
1235
argSize += TR::Compiler->om.sizeofReferenceAddress();
1236
1237
} // for loop
1238
break;
1239
1240
case TR::Double:
1241
argRegister = pushDoubleArg(child);
1242
for (int r = 0; r < ((childType == TR::Double) ? 1: 2); r++)
1243
{
1244
TR::Register * argReg;
1245
if (childType == TR::Double)
1246
argReg = argRegister;
1247
else
1248
argReg = (r == 0) ? argRegister->getHighOrder() : argRegister->getLowOrder();
1249
1250
if (numFloatArgs < properties.getNumFloatArgRegs())
1251
{
1252
if (!cg()->canClobberNodesRegister(child, 0))
1253
{
1254
tempRegister = cg()->allocateRegister(TR_FPR);
1255
generateTrg1Src1Instruction(cg(), TR::InstOpCode::fmr, callNode, tempRegister, argReg);
1256
argReg = tempRegister;
1257
}
1258
if (numFloatArgs == 0 && resType.isFloatingPoint())
1259
{
1260
TR::Register *resultReg;
1261
if (resType.getDataType() == TR::Float)
1262
resultReg = cg()->allocateSinglePrecisionRegister();
1263
else
1264
resultReg = cg()->allocateRegister(TR_FPR);
1265
dependencies->addPreCondition(argReg, (r==0) ? TR::RealRegister::fp1 : TR::RealRegister::fp2);
1266
dependencies->addPostCondition(resultReg, (r==0) ? TR::RealRegister::fp1 : TR::RealRegister::fp2);
1267
}
1268
else
1269
TR::addDependency(dependencies, argReg, properties.getFloatArgumentRegister(numFloatArgs), TR_FPR, cg());
1270
}
1271
else if (!aix_style_linkage)
1272
// numFloatArgs >= properties.getNumFloatArgRegs()
1273
{
1274
argSize = (argSize + 4) & (~7);
1275
mref = getOutgoingArgumentMemRef(argSize, argReg, TR::InstOpCode::stfd, pushToMemory[argIndex++], 8, properties);
1276
argSize += 8;
1277
}
1278
1279
if (aix_style_linkage)
1280
{
1281
if (numIntegerArgs < properties.getNumIntArgRegs())
1282
{
1283
TR::MemoryReference *tempMR;
1284
1285
if (numIntegerArgs==0 && resType.isAddress())
1286
{
1287
TR::Register *aReg = cg()->allocateRegister();
1288
TR::Register *bReg = cg()->allocateCollectedReferenceRegister();
1289
dependencies->addPreCondition(aReg, TR::RealRegister::gr3);
1290
dependencies->addPostCondition(bReg, TR::RealRegister::gr3);
1291
}
1292
else
1293
TR::addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1294
1295
if (comp()->target().is32Bit())
1296
{
1297
if ((numIntegerArgs+1) < properties.getNumIntArgRegs())
1298
TR::addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs+1), TR_GPR, cg());
1299
else
1300
{
1301
mref = getOutgoingArgumentMemRef(argSize, argReg, TR::InstOpCode::stfd, pushToMemory[argIndex++], 8, properties);
1302
}
1303
}
1304
}
1305
else // numIntegerArgs >= properties.getNumIntArgRegs()
1306
{
1307
mref = getOutgoingArgumentMemRef(argSize, argReg, TR::InstOpCode::stfd, pushToMemory[argIndex++], 8, properties);
1308
}
1309
1310
numIntegerArgs += comp()->target().is64Bit()?1:2;
1311
}
1312
numFloatArgs++;
1313
if (aix_style_linkage)
1314
argSize += 8;
1315
1316
} // end of for loop
1317
break;
1318
case TR::VectorDouble:
1319
TR_ASSERT(false, "JNI dispatch: VectorDouble argument not expected");
1320
break;
1321
}
1322
}
1323
1324
while (numIntegerArgs < properties.getNumIntArgRegs())
1325
{
1326
if (numIntegerArgs == 0 && resType.isAddress())
1327
{
1328
dependencies->addPreCondition(cg()->allocateRegister(), properties.getIntegerArgumentRegister(0));
1329
dependencies->addPostCondition(cg()->allocateCollectedReferenceRegister(), properties.getIntegerArgumentRegister(0));
1330
}
1331
else
1332
{
1333
TR::addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg());
1334
}
1335
numIntegerArgs++;
1336
}
1337
1338
TR_LiveRegisters *liveRegs;
1339
bool liveVSXScalar, liveVSXVector, liveVMX;
1340
1341
liveRegs = cg()->getLiveRegisters(TR_VSX_SCALAR);
1342
liveVSXScalar = (!liveRegs || liveRegs->getNumberOfLiveRegisters() > 0);
1343
liveRegs = cg()->getLiveRegisters(TR_VSX_VECTOR);
1344
liveVSXVector = (!liveRegs || liveRegs->getNumberOfLiveRegisters() > 0);
1345
liveRegs = cg()->getLiveRegisters(TR_VRF);
1346
liveVMX = (!liveRegs || liveRegs->getNumberOfLiveRegisters() > 0);
1347
1348
TR::addDependency(dependencies, NULL, TR::RealRegister::fp0, TR_FPR, cg());
1349
TR::addDependency(dependencies, NULL, TR::RealRegister::gr0, TR_GPR, cg());
1350
TR::addDependency(dependencies, NULL, TR::RealRegister::gr11, TR_GPR, cg());
1351
TR::addDependency(dependencies, NULL, TR::RealRegister::gr12, TR_GPR, cg());
1352
TR::addDependency(dependencies, NULL, TR::RealRegister::cr0, TR_CCR, cg());
1353
TR::addDependency(dependencies, NULL, TR::RealRegister::cr1, TR_CCR, cg());
1354
TR::addDependency(dependencies, NULL, TR::RealRegister::cr5, TR_CCR, cg());
1355
TR::addDependency(dependencies, NULL, TR::RealRegister::cr6, TR_CCR, cg());
1356
TR::addDependency(dependencies, NULL, TR::RealRegister::cr7, TR_CCR, cg());
1357
if (!isFastJNI && aix_style_linkage)
1358
TR::addDependency(dependencies, NULL, TR::RealRegister::gr2, TR_GPR, cg());
1359
1360
int32_t floatRegsUsed = (numFloatArgs>properties.getNumFloatArgRegs())?properties.getNumFloatArgRegs():numFloatArgs;
1361
1362
if (liveVMX || liveVSXScalar || liveVSXVector)
1363
{
1364
for (i=(TR::RealRegister::RegNum)((uint32_t)TR::RealRegister::LastFPR+1); i<=TR::RealRegister::LastVSR; i++)
1365
{
1366
// isFastJNI implying: no call back into Java, such that preserved is preserved
1367
if (!properties.getPreserved((TR::RealRegister::RegNum)i) || !isFastJNI)
1368
{
1369
TR::addDependency(dependencies, NULL, (TR::RealRegister::RegNum)i, TR_VSX_SCALAR, cg());
1370
}
1371
1372
}
1373
}
1374
1375
for (i=(TR::RealRegister::RegNum)((uint32_t)TR::RealRegister::fp0+floatRegsUsed+1); i<=TR::RealRegister::LastFPR; i++)
1376
{
1377
// isFastJNI implying: no call back into Java, such that preserved is preserved
1378
// TODO: liveVSXVector is an overkill for assembler mode, really only vectors required
1379
if (!properties.getPreserved((TR::RealRegister::RegNum)i) || liveVSXVector ||
1380
(!isFastJNI))
1381
{
1382
TR::addDependency(dependencies, NULL, (TR::RealRegister::RegNum)i, TR_FPR, cg());
1383
}
1384
}
1385
1386
if (memArgs > 0)
1387
{
1388
for (argIndex=0; argIndex<memArgs; argIndex++)
1389
{
1390
TR::Register *aReg = pushToMemory[argIndex].argRegister;
1391
generateMemSrc1Instruction(cg(), pushToMemory[argIndex].opCode, callNode, pushToMemory[argIndex].argMemory, aReg);
1392
cg()->stopUsingRegister(aReg);
1393
}
1394
}
1395
1396
return argSize;
1397
}
1398
1399
TR::Register *J9::Power::JNILinkage::pushJNIReferenceArg(TR::Node *child)
1400
{
1401
TR::Register *pushRegister;
1402
bool checkSplit = true;
1403
1404
if (child->getOpCodeValue() == TR::loadaddr)
1405
{
1406
TR::SymbolReference * symRef = child->getSymbolReference();
1407
TR::StaticSymbol *sym = symRef->getSymbol()->getStaticSymbol();
1408
if (sym)
1409
{
1410
if (sym->isAddressOfClassObject())
1411
{
1412
pushRegister = pushAddressArg(child);
1413
}
1414
else
1415
{
1416
TR::Register *condReg = cg()->allocateRegister(TR_CCR);
1417
TR::Register *addrReg = cg()->evaluate(child);
1418
TR::MemoryReference *tmpMemRef = TR::MemoryReference::createWithDisplacement(cg(), addrReg, (int32_t)0, TR::Compiler->om.sizeofReferenceAddress());
1419
TR::Register *whatReg = cg()->allocateCollectedReferenceRegister();
1420
TR::LabelSymbol *nonNullLabel = generateLabelSymbol(cg());
1421
1422
checkSplit = false;
1423
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, child, whatReg, tmpMemRef);
1424
if (!cg()->canClobberNodesRegister(child))
1425
{
1426
// Since this is a static variable, it is non-collectable.
1427
TR::Register *tempRegister = cg()->allocateRegister();
1428
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, child, tempRegister, addrReg);
1429
pushRegister = tempRegister;
1430
}
1431
else
1432
pushRegister = addrReg;
1433
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, child, condReg, whatReg, 0);
1434
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, child, nonNullLabel, condReg);
1435
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, child, pushRegister, 0);
1436
1437
TR::RegisterDependencyConditions *conditions = new (trHeapMemory()) TR::RegisterDependencyConditions(3, 3, trMemory());
1438
TR::addDependency(conditions, pushRegister, TR::RealRegister::NoReg, TR_GPR, cg());
1439
TR::addDependency(conditions, whatReg, TR::RealRegister::NoReg, TR_GPR, cg());
1440
TR::addDependency(conditions, condReg, TR::RealRegister::NoReg, TR_CCR, cg());
1441
1442
generateDepLabelInstruction(cg(), TR::InstOpCode::label, child, nonNullLabel, conditions);
1443
conditions->stopUsingDepRegs(cg(), pushRegister);
1444
cg()->decReferenceCount(child);
1445
}
1446
}
1447
else // must be loadaddr of parm or local
1448
{
1449
if (child->pointsToNonNull())
1450
{
1451
pushRegister = pushAddressArg(child);
1452
}
1453
else if (child->pointsToNull())
1454
{
1455
checkSplit = false;
1456
pushRegister = cg()->allocateRegister();
1457
loadConstant(cg(), child, 0, pushRegister);
1458
cg()->decReferenceCount(child);
1459
}
1460
else
1461
{
1462
TR::Register *addrReg = cg()->evaluate(child);
1463
TR::Register *condReg = cg()->allocateRegister(TR_CCR);
1464
TR::Register *whatReg = cg()->allocateCollectedReferenceRegister();
1465
TR::LabelSymbol *nonNullLabel = generateLabelSymbol(cg());
1466
1467
checkSplit = false;
1468
generateTrg1MemInstruction(cg(),TR::InstOpCode::Op_load, child, whatReg, TR::MemoryReference::createWithDisplacement(cg(), addrReg, (int32_t)0, TR::Compiler->om.sizeofReferenceAddress()));
1469
if (!cg()->canClobberNodesRegister(child))
1470
{
1471
// Since this points at a parm or local location, it is non-collectable.
1472
TR::Register *tempReg = cg()->allocateRegister();
1473
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, child, tempReg, addrReg);
1474
pushRegister = tempReg;
1475
}
1476
else
1477
pushRegister = addrReg;
1478
generateTrg1Src1ImmInstruction(cg(),TR::InstOpCode::Op_cmpi, child, condReg, whatReg, 0);
1479
generateConditionalBranchInstruction(cg(), TR::InstOpCode::bne, child, nonNullLabel, condReg);
1480
generateTrg1ImmInstruction(cg(), TR::InstOpCode::li, child, pushRegister, 0);
1481
1482
TR::RegisterDependencyConditions *conditions = new (trHeapMemory()) TR::RegisterDependencyConditions(3, 3, trMemory());
1483
TR::addDependency(conditions, pushRegister, TR::RealRegister::NoReg, TR_GPR, cg());
1484
TR::addDependency(conditions, whatReg, TR::RealRegister::NoReg, TR_GPR, cg());
1485
TR::addDependency(conditions, condReg, TR::RealRegister::NoReg, TR_CCR, cg());
1486
1487
generateDepLabelInstruction(cg(), TR::InstOpCode::label, child, nonNullLabel, conditions);
1488
conditions->stopUsingDepRegs(cg(), pushRegister);
1489
cg()->decReferenceCount(child);
1490
}
1491
}
1492
}
1493
else
1494
{
1495
pushRegister = pushAddressArg(child);
1496
}
1497
1498
if (checkSplit && !cg()->canClobberNodesRegister(child, 0))
1499
{
1500
TR::Register *tempReg = pushRegister->containsCollectedReference()?
1501
cg()->allocateCollectedReferenceRegister():cg()->allocateRegister();
1502
generateTrg1Src1Instruction(cg(), TR::InstOpCode::mr, child, tempReg, pushRegister);
1503
pushRegister = tempReg;
1504
}
1505
return pushRegister;
1506
}
1507
1508