Path: blob/main/contrib/llvm-project/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
35271 views
//===-- NVPTXLowerArgs.cpp - Lower arguments ------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8//9// Arguments to kernel and device functions are passed via param space,10// which imposes certain restrictions:11// http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces12//13// Kernel parameters are read-only and accessible only via ld.param14// instruction, directly or via a pointer.15//16// Device function parameters are directly accessible via17// ld.param/st.param, but taking the address of one returns a pointer18// to a copy created in local space which *can't* be used with19// ld.param/st.param.20//21// Copying a byval struct into local memory in IR allows us to enforce22// the param space restrictions, gives the rest of IR a pointer w/o23// param space restrictions, and gives us an opportunity to eliminate24// the copy.25//26// Pointer arguments to kernel functions need more work to be lowered:27//28// 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the29// global address space. This allows later optimizations to emit30// ld.global.*/st.global.* for accessing these pointer arguments. For31// example,32//33// define void @foo(float* %input) {34// %v = load float, float* %input, align 435// ...36// }37//38// becomes39//40// define void @foo(float* %input) {41// %input2 = addrspacecast float* %input to float addrspace(1)*42// %input3 = addrspacecast float addrspace(1)* %input2 to float*43// %v = load float, float* %input3, align 444// ...45// }46//47// Later, NVPTXInferAddressSpaces will optimize it to48//49// define void @foo(float* %input) {50// %input2 = addrspacecast float* %input to float addrspace(1)*51// %v = load float, float addrspace(1)* %input2, align 452// ...53// }54//55// 2. Convert byval kernel parameters to pointers in the param address space56// (so that NVPTX emits ld/st.param). Convert pointers *within* a byval57// kernel parameter to pointers in the global address space. This allows58// NVPTX to emit ld/st.global.59//60// struct S {61// int *x;62// int *y;63// };64// __global__ void foo(S s) {65// int *b = s.y;66// // use b67// }68//69// "b" points to the global address space. In the IR level,70//71// define void @foo(ptr byval %input) {72// %b_ptr = getelementptr {ptr, ptr}, ptr %input, i64 0, i32 173// %b = load ptr, ptr %b_ptr74// ; use %b75// }76//77// becomes78//79// define void @foo({i32*, i32*}* byval %input) {80// %b_param = addrspacecat ptr %input to ptr addrspace(101)81// %b_ptr = getelementptr {ptr, ptr}, ptr addrspace(101) %b_param, i64 0, i32 182// %b = load ptr, ptr addrspace(101) %b_ptr83// %b_global = addrspacecast ptr %b to ptr addrspace(1)84// ; use %b_generic85// }86//87// Create a local copy of kernel byval parameters used in a way that *might* mutate88// the parameter, by storing it in an alloca. Mutations to "grid_constant" parameters89// are undefined behaviour, and don't require local copies.90//91// define void @foo(ptr byval(%struct.s) align 4 %input) {92// store i32 42, ptr %input93// ret void94// }95//96// becomes97//98// define void @foo(ptr byval(%struct.s) align 4 %input) #1 {99// %input1 = alloca %struct.s, align 4100// %input2 = addrspacecast ptr %input to ptr addrspace(101)101// %input3 = load %struct.s, ptr addrspace(101) %input2, align 4102// store %struct.s %input3, ptr %input1, align 4103// store i32 42, ptr %input1, align 4104// ret void105// }106//107// If %input were passed to a device function, or written to memory,108// conservatively assume that %input gets mutated, and create a local copy.109//110// Convert param pointers to grid_constant byval kernel parameters that are111// passed into calls (device functions, intrinsics, inline asm), or otherwise112// "escape" (into stores/ptrtoints) to the generic address space, using the113// `nvvm.ptr.param.to.gen` intrinsic, so that NVPTX emits cvta.param114// (available for sm70+)115//116// define void @foo(ptr byval(%struct.s) %input) {117// ; %input is a grid_constant118// %call = call i32 @escape(ptr %input)119// ret void120// }121//122// becomes123//124// define void @foo(ptr byval(%struct.s) %input) {125// %input1 = addrspacecast ptr %input to ptr addrspace(101)126// ; the following intrinsic converts pointer to generic. We don't use an addrspacecast127// ; to prevent generic -> param -> generic from getting cancelled out128// %input1.gen = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) %input1)129// %call = call i32 @escape(ptr %input1.gen)130// ret void131// }132//133// TODO: merge this pass with NVPTXInferAddressSpaces so that other passes don't134// cancel the addrspacecast pair this pass emits.135//===----------------------------------------------------------------------===//136137#include "MCTargetDesc/NVPTXBaseInfo.h"138#include "NVPTX.h"139#include "NVPTXTargetMachine.h"140#include "NVPTXUtilities.h"141#include "llvm/Analysis/ValueTracking.h"142#include "llvm/CodeGen/TargetPassConfig.h"143#include "llvm/IR/Function.h"144#include "llvm/IR/IRBuilder.h"145#include "llvm/IR/Instructions.h"146#include "llvm/IR/IntrinsicsNVPTX.h"147#include "llvm/IR/Module.h"148#include "llvm/IR/Type.h"149#include "llvm/InitializePasses.h"150#include "llvm/Pass.h"151#include <numeric>152#include <queue>153154#define DEBUG_TYPE "nvptx-lower-args"155156using namespace llvm;157158namespace llvm {159void initializeNVPTXLowerArgsPass(PassRegistry &);160}161162namespace {163class NVPTXLowerArgs : public FunctionPass {164bool runOnFunction(Function &F) override;165166bool runOnKernelFunction(const NVPTXTargetMachine &TM, Function &F);167bool runOnDeviceFunction(const NVPTXTargetMachine &TM, Function &F);168169// handle byval parameters170void handleByValParam(const NVPTXTargetMachine &TM, Argument *Arg);171// Knowing Ptr must point to the global address space, this function172// addrspacecasts Ptr to global and then back to generic. This allows173// NVPTXInferAddressSpaces to fold the global-to-generic cast into174// loads/stores that appear later.175void markPointerAsGlobal(Value *Ptr);176177public:178static char ID; // Pass identification, replacement for typeid179NVPTXLowerArgs() : FunctionPass(ID) {}180StringRef getPassName() const override {181return "Lower pointer arguments of CUDA kernels";182}183void getAnalysisUsage(AnalysisUsage &AU) const override {184AU.addRequired<TargetPassConfig>();185}186};187} // namespace188189char NVPTXLowerArgs::ID = 1;190191INITIALIZE_PASS_BEGIN(NVPTXLowerArgs, "nvptx-lower-args",192"Lower arguments (NVPTX)", false, false)193INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)194INITIALIZE_PASS_END(NVPTXLowerArgs, "nvptx-lower-args",195"Lower arguments (NVPTX)", false, false)196197// =============================================================================198// If the function had a byval struct ptr arg, say foo(%struct.x* byval %d),199// and we can't guarantee that the only accesses are loads,200// then add the following instructions to the first basic block:201//202// %temp = alloca %struct.x, align 8203// %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)*204// %tv = load %struct.x addrspace(101)* %tempd205// store %struct.x %tv, %struct.x* %temp, align 8206//207// The above code allocates some space in the stack and copies the incoming208// struct from param space to local space.209// Then replace all occurrences of %d by %temp.210//211// In case we know that all users are GEPs or Loads, replace them with the same212// ones in parameter AS, so we can access them using ld.param.213// =============================================================================214215// For Loads, replaces the \p OldUse of the pointer with a Use of the same216// pointer in parameter AS.217// For "escapes" (to memory, a function call, or a ptrtoint), cast the OldUse to218// generic using cvta.param.219static void convertToParamAS(Use *OldUse, Value *Param, bool GridConstant) {220Instruction *I = dyn_cast<Instruction>(OldUse->getUser());221assert(I && "OldUse must be in an instruction");222struct IP {223Use *OldUse;224Instruction *OldInstruction;225Value *NewParam;226};227SmallVector<IP> ItemsToConvert = {{OldUse, I, Param}};228SmallVector<Instruction *> InstructionsToDelete;229230auto CloneInstInParamAS = [GridConstant](const IP &I) -> Value * {231if (auto *LI = dyn_cast<LoadInst>(I.OldInstruction)) {232LI->setOperand(0, I.NewParam);233return LI;234}235if (auto *GEP = dyn_cast<GetElementPtrInst>(I.OldInstruction)) {236SmallVector<Value *, 4> Indices(GEP->indices());237auto *NewGEP = GetElementPtrInst::Create(238GEP->getSourceElementType(), I.NewParam, Indices, GEP->getName(),239GEP->getIterator());240NewGEP->setIsInBounds(GEP->isInBounds());241return NewGEP;242}243if (auto *BC = dyn_cast<BitCastInst>(I.OldInstruction)) {244auto *NewBCType = PointerType::get(BC->getContext(), ADDRESS_SPACE_PARAM);245return BitCastInst::Create(BC->getOpcode(), I.NewParam, NewBCType,246BC->getName(), BC->getIterator());247}248if (auto *ASC = dyn_cast<AddrSpaceCastInst>(I.OldInstruction)) {249assert(ASC->getDestAddressSpace() == ADDRESS_SPACE_PARAM);250(void)ASC;251// Just pass through the argument, the old ASC is no longer needed.252return I.NewParam;253}254255if (GridConstant) {256auto GetParamAddrCastToGeneric =257[](Value *Addr, Instruction *OriginalUser) -> Value * {258PointerType *ReturnTy =259PointerType::get(OriginalUser->getContext(), ADDRESS_SPACE_GENERIC);260Function *CvtToGen = Intrinsic::getDeclaration(261OriginalUser->getModule(), Intrinsic::nvvm_ptr_param_to_gen,262{ReturnTy, PointerType::get(OriginalUser->getContext(),263ADDRESS_SPACE_PARAM)});264265// Cast param address to generic address space266Value *CvtToGenCall =267CallInst::Create(CvtToGen, Addr, Addr->getName() + ".gen",268OriginalUser->getIterator());269return CvtToGenCall;270};271272if (auto *CI = dyn_cast<CallInst>(I.OldInstruction)) {273I.OldUse->set(GetParamAddrCastToGeneric(I.NewParam, CI));274return CI;275}276if (auto *SI = dyn_cast<StoreInst>(I.OldInstruction)) {277// byval address is being stored, cast it to generic278if (SI->getValueOperand() == I.OldUse->get())279SI->setOperand(0, GetParamAddrCastToGeneric(I.NewParam, SI));280return SI;281}282if (auto *PI = dyn_cast<PtrToIntInst>(I.OldInstruction)) {283if (PI->getPointerOperand() == I.OldUse->get())284PI->setOperand(0, GetParamAddrCastToGeneric(I.NewParam, PI));285return PI;286}287llvm_unreachable(288"Instruction unsupported even for grid_constant argument");289}290291llvm_unreachable("Unsupported instruction");292};293294while (!ItemsToConvert.empty()) {295IP I = ItemsToConvert.pop_back_val();296Value *NewInst = CloneInstInParamAS(I);297298if (NewInst && NewInst != I.OldInstruction) {299// We've created a new instruction. Queue users of the old instruction to300// be converted and the instruction itself to be deleted. We can't delete301// the old instruction yet, because it's still in use by a load somewhere.302for (Use &U : I.OldInstruction->uses())303ItemsToConvert.push_back({&U, cast<Instruction>(U.getUser()), NewInst});304305InstructionsToDelete.push_back(I.OldInstruction);306}307}308309// Now we know that all argument loads are using addresses in parameter space310// and we can finally remove the old instructions in generic AS. Instructions311// scheduled for removal should be processed in reverse order so the ones312// closest to the load are deleted first. Otherwise they may still be in use.313// E.g if we have Value = Load(BitCast(GEP(arg))), InstructionsToDelete will314// have {GEP,BitCast}. GEP can't be deleted first, because it's still used by315// the BitCast.316for (Instruction *I : llvm::reverse(InstructionsToDelete))317I->eraseFromParent();318}319320// Adjust alignment of arguments passed byval in .param address space. We can321// increase alignment of such arguments in a way that ensures that we can322// effectively vectorize their loads. We should also traverse all loads from323// byval pointer and adjust their alignment, if those were using known offset.324// Such alignment changes must be conformed with parameter store and load in325// NVPTXTargetLowering::LowerCall.326static void adjustByValArgAlignment(Argument *Arg, Value *ArgInParamAS,327const NVPTXTargetLowering *TLI) {328Function *Func = Arg->getParent();329Type *StructType = Arg->getParamByValType();330const DataLayout DL(Func->getParent());331332uint64_t NewArgAlign =333TLI->getFunctionParamOptimizedAlign(Func, StructType, DL).value();334uint64_t CurArgAlign =335Arg->getAttribute(Attribute::Alignment).getValueAsInt();336337if (CurArgAlign >= NewArgAlign)338return;339340LLVM_DEBUG(dbgs() << "Try to use alignment " << NewArgAlign << " instead of "341<< CurArgAlign << " for " << *Arg << '\n');342343auto NewAlignAttr =344Attribute::get(Func->getContext(), Attribute::Alignment, NewArgAlign);345Arg->removeAttr(Attribute::Alignment);346Arg->addAttr(NewAlignAttr);347348struct Load {349LoadInst *Inst;350uint64_t Offset;351};352353struct LoadContext {354Value *InitialVal;355uint64_t Offset;356};357358SmallVector<Load> Loads;359std::queue<LoadContext> Worklist;360Worklist.push({ArgInParamAS, 0});361bool IsGridConstant = isParamGridConstant(*Arg);362363while (!Worklist.empty()) {364LoadContext Ctx = Worklist.front();365Worklist.pop();366367for (User *CurUser : Ctx.InitialVal->users()) {368if (auto *I = dyn_cast<LoadInst>(CurUser)) {369Loads.push_back({I, Ctx.Offset});370continue;371}372373if (auto *I = dyn_cast<BitCastInst>(CurUser)) {374Worklist.push({I, Ctx.Offset});375continue;376}377378if (auto *I = dyn_cast<GetElementPtrInst>(CurUser)) {379APInt OffsetAccumulated =380APInt::getZero(DL.getIndexSizeInBits(ADDRESS_SPACE_PARAM));381382if (!I->accumulateConstantOffset(DL, OffsetAccumulated))383continue;384385uint64_t OffsetLimit = -1;386uint64_t Offset = OffsetAccumulated.getLimitedValue(OffsetLimit);387assert(Offset != OffsetLimit && "Expect Offset less than UINT64_MAX");388389Worklist.push({I, Ctx.Offset + Offset});390continue;391}392393// supported for grid_constant394if (IsGridConstant &&395(isa<CallInst>(CurUser) || isa<StoreInst>(CurUser) ||396isa<PtrToIntInst>(CurUser)))397continue;398399llvm_unreachable("All users must be one of: load, "400"bitcast, getelementptr, call, store, ptrtoint");401}402}403404for (Load &CurLoad : Loads) {405Align NewLoadAlign(std::gcd(NewArgAlign, CurLoad.Offset));406Align CurLoadAlign(CurLoad.Inst->getAlign());407CurLoad.Inst->setAlignment(std::max(NewLoadAlign, CurLoadAlign));408}409}410411void NVPTXLowerArgs::handleByValParam(const NVPTXTargetMachine &TM,412Argument *Arg) {413bool IsGridConstant = isParamGridConstant(*Arg);414Function *Func = Arg->getParent();415BasicBlock::iterator FirstInst = Func->getEntryBlock().begin();416Type *StructType = Arg->getParamByValType();417assert(StructType && "Missing byval type");418419auto AreSupportedUsers = [&](Value *Start) {420SmallVector<Value *, 16> ValuesToCheck = {Start};421auto IsSupportedUse = [IsGridConstant](Value *V) -> bool {422if (isa<GetElementPtrInst>(V) || isa<BitCastInst>(V) || isa<LoadInst>(V))423return true;424// ASC to param space are OK, too -- we'll just strip them.425if (auto *ASC = dyn_cast<AddrSpaceCastInst>(V)) {426if (ASC->getDestAddressSpace() == ADDRESS_SPACE_PARAM)427return true;428}429// Simple calls and stores are supported for grid_constants430// writes to these pointers are undefined behaviour431if (IsGridConstant &&432(isa<CallInst>(V) || isa<StoreInst>(V) || isa<PtrToIntInst>(V)))433return true;434return false;435};436437while (!ValuesToCheck.empty()) {438Value *V = ValuesToCheck.pop_back_val();439if (!IsSupportedUse(V)) {440LLVM_DEBUG(dbgs() << "Need a "441<< (isParamGridConstant(*Arg) ? "cast " : "copy ")442<< "of " << *Arg << " because of " << *V << "\n");443(void)Arg;444return false;445}446if (!isa<LoadInst>(V) && !isa<CallInst>(V) && !isa<StoreInst>(V) &&447!isa<PtrToIntInst>(V))448llvm::append_range(ValuesToCheck, V->users());449}450return true;451};452453if (llvm::all_of(Arg->users(), AreSupportedUsers)) {454// Convert all loads and intermediate operations to use parameter AS and455// skip creation of a local copy of the argument.456SmallVector<Use *, 16> UsesToUpdate;457for (Use &U : Arg->uses())458UsesToUpdate.push_back(&U);459460Value *ArgInParamAS = new AddrSpaceCastInst(461Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(),462FirstInst);463for (Use *U : UsesToUpdate)464convertToParamAS(U, ArgInParamAS, IsGridConstant);465LLVM_DEBUG(dbgs() << "No need to copy or cast " << *Arg << "\n");466467const auto *TLI =468cast<NVPTXTargetLowering>(TM.getSubtargetImpl()->getTargetLowering());469470adjustByValArgAlignment(Arg, ArgInParamAS, TLI);471472return;473}474475const DataLayout &DL = Func->getDataLayout();476unsigned AS = DL.getAllocaAddrSpace();477if (isParamGridConstant(*Arg)) {478// Writes to a grid constant are undefined behaviour. We do not need a479// temporary copy. When a pointer might have escaped, conservatively replace480// all of its uses (which might include a device function call) with a cast481// to the generic address space.482IRBuilder<> IRB(&Func->getEntryBlock().front());483484// Cast argument to param address space485auto *CastToParam = cast<AddrSpaceCastInst>(IRB.CreateAddrSpaceCast(486Arg, IRB.getPtrTy(ADDRESS_SPACE_PARAM), Arg->getName() + ".param"));487488// Cast param address to generic address space. We do not use an489// addrspacecast to generic here, because, LLVM considers `Arg` to be in the490// generic address space, and a `generic -> param` cast followed by a `param491// -> generic` cast will be folded away. The `param -> generic` intrinsic492// will be correctly lowered to `cvta.param`.493Value *CvtToGenCall = IRB.CreateIntrinsic(494IRB.getPtrTy(ADDRESS_SPACE_GENERIC), Intrinsic::nvvm_ptr_param_to_gen,495CastToParam, nullptr, CastToParam->getName() + ".gen");496497Arg->replaceAllUsesWith(CvtToGenCall);498499// Do not replace Arg in the cast to param space500CastToParam->setOperand(0, Arg);501} else {502// Otherwise we have to create a temporary copy.503AllocaInst *AllocA =504new AllocaInst(StructType, AS, Arg->getName(), FirstInst);505// Set the alignment to alignment of the byval parameter. This is because,506// later load/stores assume that alignment, and we are going to replace507// the use of the byval parameter with this alloca instruction.508AllocA->setAlignment(Func->getParamAlign(Arg->getArgNo())509.value_or(DL.getPrefTypeAlign(StructType)));510Arg->replaceAllUsesWith(AllocA);511512Value *ArgInParam = new AddrSpaceCastInst(513Arg, PointerType::get(Arg->getContext(), ADDRESS_SPACE_PARAM),514Arg->getName(), FirstInst);515// Be sure to propagate alignment to this load; LLVM doesn't know that NVPTX516// addrspacecast preserves alignment. Since params are constant, this load517// is definitely not volatile.518LoadInst *LI =519new LoadInst(StructType, ArgInParam, Arg->getName(),520/*isVolatile=*/false, AllocA->getAlign(), FirstInst);521new StoreInst(LI, AllocA, FirstInst);522}523}524525void NVPTXLowerArgs::markPointerAsGlobal(Value *Ptr) {526if (Ptr->getType()->getPointerAddressSpace() != ADDRESS_SPACE_GENERIC)527return;528529// Deciding where to emit the addrspacecast pair.530BasicBlock::iterator InsertPt;531if (Argument *Arg = dyn_cast<Argument>(Ptr)) {532// Insert at the functon entry if Ptr is an argument.533InsertPt = Arg->getParent()->getEntryBlock().begin();534} else {535// Insert right after Ptr if Ptr is an instruction.536InsertPt = ++cast<Instruction>(Ptr)->getIterator();537assert(InsertPt != InsertPt->getParent()->end() &&538"We don't call this function with Ptr being a terminator.");539}540541Instruction *PtrInGlobal = new AddrSpaceCastInst(542Ptr, PointerType::get(Ptr->getContext(), ADDRESS_SPACE_GLOBAL),543Ptr->getName(), InsertPt);544Value *PtrInGeneric = new AddrSpaceCastInst(PtrInGlobal, Ptr->getType(),545Ptr->getName(), InsertPt);546// Replace with PtrInGeneric all uses of Ptr except PtrInGlobal.547Ptr->replaceAllUsesWith(PtrInGeneric);548PtrInGlobal->setOperand(0, Ptr);549}550551// =============================================================================552// Main function for this pass.553// =============================================================================554bool NVPTXLowerArgs::runOnKernelFunction(const NVPTXTargetMachine &TM,555Function &F) {556// Copying of byval aggregates + SROA may result in pointers being loaded as557// integers, followed by intotoptr. We may want to mark those as global, too,558// but only if the loaded integer is used exclusively for conversion to a559// pointer with inttoptr.560auto HandleIntToPtr = [this](Value &V) {561if (llvm::all_of(V.users(), [](User *U) { return isa<IntToPtrInst>(U); })) {562SmallVector<User *, 16> UsersToUpdate(V.users());563for (User *U : UsersToUpdate)564markPointerAsGlobal(U);565}566};567if (TM.getDrvInterface() == NVPTX::CUDA) {568// Mark pointers in byval structs as global.569for (auto &B : F) {570for (auto &I : B) {571if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {572if (LI->getType()->isPointerTy() || LI->getType()->isIntegerTy()) {573Value *UO = getUnderlyingObject(LI->getPointerOperand());574if (Argument *Arg = dyn_cast<Argument>(UO)) {575if (Arg->hasByValAttr()) {576// LI is a load from a pointer within a byval kernel parameter.577if (LI->getType()->isPointerTy())578markPointerAsGlobal(LI);579else580HandleIntToPtr(*LI);581}582}583}584}585}586}587}588589LLVM_DEBUG(dbgs() << "Lowering kernel args of " << F.getName() << "\n");590for (Argument &Arg : F.args()) {591if (Arg.getType()->isPointerTy()) {592if (Arg.hasByValAttr())593handleByValParam(TM, &Arg);594else if (TM.getDrvInterface() == NVPTX::CUDA)595markPointerAsGlobal(&Arg);596} else if (Arg.getType()->isIntegerTy() &&597TM.getDrvInterface() == NVPTX::CUDA) {598HandleIntToPtr(Arg);599}600}601return true;602}603604// Device functions only need to copy byval args into local memory.605bool NVPTXLowerArgs::runOnDeviceFunction(const NVPTXTargetMachine &TM,606Function &F) {607LLVM_DEBUG(dbgs() << "Lowering function args of " << F.getName() << "\n");608for (Argument &Arg : F.args())609if (Arg.getType()->isPointerTy() && Arg.hasByValAttr())610handleByValParam(TM, &Arg);611return true;612}613614bool NVPTXLowerArgs::runOnFunction(Function &F) {615auto &TM = getAnalysis<TargetPassConfig>().getTM<NVPTXTargetMachine>();616617return isKernelFunction(F) ? runOnKernelFunction(TM, F)618: runOnDeviceFunction(TM, F);619}620621FunctionPass *llvm::createNVPTXLowerArgsPass() { return new NVPTXLowerArgs(); }622623624