12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647 |
- /* -*- c -*-
- ----------------------------------------------------------------
- Notice that the following BSD-style license applies to this one
- file (valgrind.h) only. The rest of Valgrind is licensed under the
- terms of the GNU General Public License, version 2, unless
- otherwise indicated. See the COPYING file in the source
- distribution for details.
- ----------------------------------------------------------------
- This file is part of Valgrind, a dynamic binary instrumentation
- framework.
- Copyright (C) 2000-2017 Julian Seward. All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
- documentation would be appreciated but is not required.
- 3. Altered source versions must be plainly marked as such, and must
- not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
- GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- ----------------------------------------------------------------
- Notice that the above BSD-style license applies to this one file
- (valgrind.h) only. The entire rest of Valgrind is licensed under
- the terms of the GNU General Public License, version 2. See the
- COPYING file in the source distribution for details.
- ----------------------------------------------------------------
- */
- /* This file is for inclusion into client (your!) code.
- You can use these macros to manipulate and query Valgrind's
- execution inside your own programs.
- The resulting executables will still run without Valgrind, just a
- little bit more slowly than they otherwise would, but otherwise
- unchanged. When not running on valgrind, each client request
- consumes very few (eg. 7) instructions, so the resulting performance
- loss is negligible unless you plan to execute client requests
- millions of times per second. Nevertheless, if that is still a
- problem, you can compile with the NVALGRIND symbol defined (gcc
- -DNVALGRIND) so that client requests are not even compiled in. */
- #ifndef __VALGRIND_H
- #define __VALGRIND_H
- /* ------------------------------------------------------------------ */
- /* VERSION NUMBER OF VALGRIND */
- /* ------------------------------------------------------------------ */
- /* Specify Valgrind's version number, so that user code can
- conditionally compile based on our version number. Note that these
- were introduced at version 3.6 and so do not exist in version 3.5
- or earlier. The recommended way to use them to check for "version
- X.Y or later" is (eg)
- #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
- && (__VALGRIND_MAJOR__ > 3 \
- || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
- */
- #define __VALGRIND_MAJOR__ 3
- #define __VALGRIND_MINOR__ 15
- #include <stdarg.h>
- /* Nb: this file might be included in a file compiled with -ansi. So
- we can't use C++ style "//" comments nor the "asm" keyword (instead
- use "__asm__"). */
- /* Derive some tags indicating what the target platform is. Note
- that in this file we're using the compiler's CPP symbols for
- identifying architectures, which are different to the ones we use
- within the rest of Valgrind. Note, __powerpc__ is active for both
- 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is).
- Misc note: how to find out what's predefined in gcc by default:
- gcc -Wp,-dM somefile.c
- */
- #undef PLAT_x86_darwin
- #undef PLAT_amd64_darwin
- #undef PLAT_x86_win32
- #undef PLAT_amd64_win64
- #undef PLAT_x86_linux
- #undef PLAT_amd64_linux
- #undef PLAT_ppc32_linux
- #undef PLAT_ppc64be_linux
- #undef PLAT_ppc64le_linux
- #undef PLAT_arm_linux
- #undef PLAT_arm64_linux
- #undef PLAT_s390x_linux
- #undef PLAT_mips32_linux
- #undef PLAT_mips64_linux
- #undef PLAT_x86_solaris
- #undef PLAT_amd64_solaris
- #if defined(__APPLE__) && defined(__i386__)
- # define PLAT_x86_darwin 1
- #elif defined(__APPLE__) && defined(__x86_64__)
- # define PLAT_amd64_darwin 1
- #elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
- || defined(__CYGWIN32__) \
- || (defined(_WIN32) && defined(_M_IX86))
- # define PLAT_x86_win32 1
- #elif defined(__MINGW64__) \
- || (defined(_WIN64) && defined(_M_X64))
- # define PLAT_amd64_win64 1
- #elif defined(__linux__) && defined(__i386__)
- # define PLAT_x86_linux 1
- #elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__)
- # define PLAT_amd64_linux 1
- #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
- # define PLAT_ppc32_linux 1
- #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2
- /* Big Endian uses ELF version 1 */
- # define PLAT_ppc64be_linux 1
- #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2
- /* Little Endian uses ELF version 2 */
- # define PLAT_ppc64le_linux 1
- #elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__)
- # define PLAT_arm_linux 1
- #elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__)
- # define PLAT_arm64_linux 1
- #elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
- # define PLAT_s390x_linux 1
- #elif defined(__linux__) && defined(__mips__) && (__mips==64)
- # define PLAT_mips64_linux 1
- #elif defined(__linux__) && defined(__mips__) && (__mips!=64)
- # define PLAT_mips32_linux 1
- #elif defined(__sun) && defined(__i386__)
- # define PLAT_x86_solaris 1
- #elif defined(__sun) && defined(__x86_64__)
- # define PLAT_amd64_solaris 1
- #else
- /* If we're not compiling for our target platform, don't generate
- any inline asms. */
- # if !defined(NVALGRIND)
- # define NVALGRIND 1
- # endif
- #endif
- /* ------------------------------------------------------------------ */
- /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
- /* in here of use to end-users -- skip to the next section. */
- /* ------------------------------------------------------------------ */
- /*
- * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
- * request. Accepts both pointers and integers as arguments.
- *
- * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
- * client request that does not return a value.
- * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
- * client request and whose value equals the client request result. Accepts
- * both pointers and integers as arguments. Note that such calls are not
- * necessarily pure functions -- they may have side effects.
- */
- #define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
- _zzq_request, _zzq_arg1, _zzq_arg2, \
- _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
- (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
- (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
- #define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
- _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
- (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
- #if defined(NVALGRIND)
- /* Define NVALGRIND to completely remove the Valgrind magic sequence
- from the compiled code (analogous to NDEBUG's effects on
- assert()) */
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- (_zzq_default)
- #else /* ! NVALGRIND */
- /* The following defines the magic code sequences which the JITter
- spots and handles magically. Don't look too closely at them as
- they will rot your brain.
- The assembly code sequences for all architectures is in this one
- file. This is because this file must be stand-alone, and we don't
- want to have multiple files.
- For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
- value gets put in the return slot, so that everything works when
- this is executed not under Valgrind. Args are passed in a memory
- block, and so there's no intrinsic limit to the number that could
- be passed, but it's currently five.
-
- The macro args are:
- _zzq_rlval result lvalue
- _zzq_default default value (result returned when running on real CPU)
- _zzq_request request code
- _zzq_arg1..5 request params
- The other two macros are used to support function wrapping, and are
- a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
- guest's NRADDR pseudo-register and whatever other information is
- needed to safely run the call original from the wrapper: on
- ppc64-linux, the R2 value at the divert point is also needed. This
- information is abstracted into a user-visible type, OrigFn.
- VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
- guest, but guarantees that the branch instruction will not be
- redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
- branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
- complete inline asm, since it needs to be combined with more magic
- inline asm stuff to be useful.
- */
- /* ----------------- x86-{linux,darwin,solaris} ---------------- */
- #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
- || (defined(PLAT_x86_win32) && defined(__GNUC__)) \
- || defined(PLAT_x86_solaris)
- typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "roll $3, %%edi ; roll $13, %%edi\n\t" \
- "roll $29, %%edi ; roll $19, %%edi\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EDX = client_request ( %EAX ) */ \
- "xchgl %%ebx,%%ebx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- "xchgl %%ecx,%%ecx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_EAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%EAX */ \
- "xchgl %%edx,%%edx\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "xchgl %%edi,%%edi\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
- #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__)
- || PLAT_x86_solaris */
- /* ------------------------- x86-Win32 ------------------------- */
- #if defined(PLAT_x86_win32) && !defined(__GNUC__)
- typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
- #if defined(_MSC_VER)
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- __asm rol edi, 3 __asm rol edi, 13 \
- __asm rol edi, 29 __asm rol edi, 19
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
- (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
- (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
- (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
- static __inline uintptr_t
- valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
- uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
- uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
- uintptr_t _zzq_arg5)
- {
- volatile uintptr_t _zzq_args[6];
- volatile unsigned int _zzq_result;
- _zzq_args[0] = (uintptr_t)(_zzq_request);
- _zzq_args[1] = (uintptr_t)(_zzq_arg1);
- _zzq_args[2] = (uintptr_t)(_zzq_arg2);
- _zzq_args[3] = (uintptr_t)(_zzq_arg3);
- _zzq_args[4] = (uintptr_t)(_zzq_arg4);
- _zzq_args[5] = (uintptr_t)(_zzq_arg5);
- __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
- __SPECIAL_INSTRUCTION_PREAMBLE
- /* %EDX = client_request ( %EAX ) */
- __asm xchg ebx,ebx
- __asm mov _zzq_result, edx
- }
- return _zzq_result;
- }
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %EAX = guest_NRADDR */ \
- __asm xchg ecx,ecx \
- __asm mov __addr, eax \
- } \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_EAX ERROR
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
- __asm xchg edi,edi \
- } \
- } while (0)
- #else
- #error Unsupported compiler.
- #endif
- #endif /* PLAT_x86_win32 */
- /* ----------------- amd64-{linux,darwin,solaris} --------------- */
- #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
- || defined(PLAT_amd64_solaris) \
- || (defined(PLAT_amd64_win64) && defined(__GNUC__))
- typedef
- struct {
- unsigned long int nraddr; /* where's the code? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
- "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned long int _zzq_args[6]; \
- volatile unsigned long int _zzq_result; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RDX = client_request ( %RAX ) */ \
- "xchgq %%rbx,%%rbx" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %RAX = guest_NRADDR */ \
- "xchgq %%rcx,%%rcx" \
- : "=a" (__addr) \
- : \
- : "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_RAX \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%RAX */ \
- "xchgq %%rdx,%%rdx\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "xchgq %%rdi,%%rdi\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
- #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
- /* ------------------------- amd64-Win64 ------------------------- */
- #if defined(PLAT_amd64_win64) && !defined(__GNUC__)
- #error Unsupported compiler.
- #endif /* PLAT_amd64_win64 */
- /* ------------------------ ppc32-linux ------------------------ */
- #if defined(PLAT_ppc32_linux)
- typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \
- "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned int _zzq_args[6]; \
- unsigned int _zzq_result; \
- unsigned int* _zzq_ptr; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or 5,5,5\n\t" \
- ); \
- } while (0)
- #endif /* PLAT_ppc32_linux */
- /* ------------------------ ppc64-linux ------------------------ */
- #if defined(PLAT_ppc64be_linux)
- typedef
- struct {
- unsigned long int nraddr; /* where's the code? */
- unsigned long int r2; /* what tocptr do we need? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned long int _zzq_args[6]; \
- unsigned long int _zzq_result; \
- unsigned long int* _zzq_ptr; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
- #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or 5,5,5\n\t" \
- ); \
- } while (0)
- #endif /* PLAT_ppc64be_linux */
- #if defined(PLAT_ppc64le_linux)
- typedef
- struct {
- unsigned long int nraddr; /* where's the code? */
- unsigned long int r2; /* what tocptr do we need? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({ unsigned long int _zzq_args[6]; \
- unsigned long int _zzq_result; \
- unsigned long int* _zzq_ptr; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 3,%1\n\t" /*default*/ \
- "mr 4,%2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" /*result*/ \
- : "=b" (_zzq_result) \
- : "b" (_zzq_default), "b" (_zzq_ptr) \
- : "cc", "memory", "r3", "r4"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
- #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R12 */ \
- "or 3,3,3\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or 5,5,5\n\t" \
- ); \
- } while (0)
- #endif /* PLAT_ppc64le_linux */
- /* ------------------------- arm-linux ------------------------- */
- #if defined(PLAT_arm_linux)
- typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
- "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile("mov r3, %1\n\t" /*default*/ \
- "mov r4, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = client_request ( R4 ) */ \
- "orr r10, r10, r10\n\t" \
- "mov %0, r3" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "cc","memory", "r3", "r4"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* R3 = guest_NRADDR */ \
- "orr r11, r11, r11\n\t" \
- "mov %0, r3" \
- : "=r" (__addr) \
- : \
- : "cc", "memory", "r3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R4 */ \
- "orr r12, r12, r12\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "orr r9, r9, r9\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
- #endif /* PLAT_arm_linux */
- /* ------------------------ arm64-linux ------------------------- */
- #if defined(PLAT_arm64_linux)
- typedef
- struct {
- unsigned long int nraddr; /* where's the code? */
- }
- OrigFn;
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \
- "ror x12, x12, #51 ; ror x12, x12, #61 \n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- __extension__ \
- ({volatile unsigned long int _zzq_args[6]; \
- volatile unsigned long int _zzq_result; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- __asm__ volatile("mov x3, %1\n\t" /*default*/ \
- "mov x4, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* X3 = client_request ( X4 ) */ \
- "orr x10, x10, x10\n\t" \
- "mov %0, x3" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" ((unsigned long int)(_zzq_default)), \
- "r" (&_zzq_args[0]) \
- : "cc","memory", "x3", "x4"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* X3 = guest_NRADDR */ \
- "orr x11, x11, x11\n\t" \
- "mov %0, x3" \
- : "=r" (__addr) \
- : \
- : "cc", "memory", "x3" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir X8 */ \
- "orr x12, x12, x12\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "orr x9, x9, x9\n\t" \
- : : : "cc", "memory" \
- ); \
- } while (0)
- #endif /* PLAT_arm64_linux */
- /* ------------------------ s390x-linux ------------------------ */
- #if defined(PLAT_s390x_linux)
- typedef
- struct {
- unsigned long int nraddr; /* where's the code? */
- }
- OrigFn;
- /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
- * code. This detection is implemented in platform specific toIR.c
- * (e.g. VEX/priv/guest_s390_decoder.c).
- */
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "lr 15,15\n\t" \
- "lr 1,1\n\t" \
- "lr 2,2\n\t" \
- "lr 3,3\n\t"
- #define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
- #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
- #define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
- #define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({volatile unsigned long int _zzq_args[6]; \
- volatile unsigned long int _zzq_result; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- __asm__ volatile(/* r2 = args */ \
- "lgr 2,%1\n\t" \
- /* r3 = default */ \
- "lgr 3,%2\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CLIENT_REQUEST_CODE \
- /* results = r3 */ \
- "lgr %0, 3\n\t" \
- : "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "2", "3", "memory" \
- ); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- __GET_NR_CONTEXT_CODE \
- "lgr %0, 3\n\t" \
- : "=a" (__addr) \
- : \
- : "cc", "3", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_R1 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- __CALL_NO_REDIR_CODE
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- __VEX_INJECT_IR_CODE); \
- } while (0)
- #endif /* PLAT_s390x_linux */
- /* ------------------------- mips32-linux ---------------- */
- #if defined(PLAT_mips32_linux)
- typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
- /* .word 0x342
- * .word 0x742
- * .word 0xC2
- * .word 0x4C2*/
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "srl $0, $0, 13\n\t" \
- "srl $0, $0, 29\n\t" \
- "srl $0, $0, 3\n\t" \
- "srl $0, $0, 19\n\t"
-
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned int _zzq_args[6]; \
- volatile unsigned int _zzq_result; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int)(_zzq_arg5); \
- __asm__ volatile("move $11, %1\n\t" /*default*/ \
- "move $12, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* T3 = client_request ( T4 ) */ \
- "or $13, $13, $13\n\t" \
- "move %0, $11\n\t" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "$11", "$12", "memory"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %t9 = guest_NRADDR */ \
- "or $14, $14, $14\n\t" \
- "move %0, $11" /*result*/ \
- : "=r" (__addr) \
- : \
- : "$11" \
- ); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_T9 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir *%t9 */ \
- "or $15, $15, $15\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or $11, $11, $11\n\t" \
- ); \
- } while (0)
- #endif /* PLAT_mips32_linux */
- /* ------------------------- mips64-linux ---------------- */
- #if defined(PLAT_mips64_linux)
- typedef
- struct {
- unsigned long nraddr; /* where's the code? */
- }
- OrigFn;
- /* dsll $0,$0, 3
- * dsll $0,$0, 13
- * dsll $0,$0, 29
- * dsll $0,$0, 19*/
- #define __SPECIAL_INSTRUCTION_PREAMBLE \
- "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
- "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
- #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- __extension__ \
- ({ volatile unsigned long int _zzq_args[6]; \
- volatile unsigned long int _zzq_result; \
- _zzq_args[0] = (unsigned long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
- __asm__ volatile("move $11, %1\n\t" /*default*/ \
- "move $12, %2\n\t" /*ptr*/ \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* $11 = client_request ( $12 ) */ \
- "or $13, $13, $13\n\t" \
- "move %0, $11\n\t" /*result*/ \
- : "=r" (_zzq_result) \
- : "r" (_zzq_default), "r" (&_zzq_args[0]) \
- : "$11", "$12", "memory"); \
- _zzq_result; \
- })
- #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long int __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* $11 = guest_NRADDR */ \
- "or $14, $14, $14\n\t" \
- "move %0, $11" /*result*/ \
- : "=r" (__addr) \
- : \
- : "$11"); \
- _zzq_orig->nraddr = __addr; \
- }
- #define VALGRIND_CALL_NOREDIR_T9 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* call-noredir $25 */ \
- "or $15, $15, $15\n\t"
- #define VALGRIND_VEX_INJECT_IR() \
- do { \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- "or $11, $11, $11\n\t" \
- ); \
- } while (0)
- #endif /* PLAT_mips64_linux */
- /* Insert assembly code for other platforms here... */
- #endif /* NVALGRIND */
- /* ------------------------------------------------------------------ */
- /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
- /* ugly. It's the least-worst tradeoff I can think of. */
- /* ------------------------------------------------------------------ */
- /* This section defines magic (a.k.a appalling-hack) macros for doing
- guaranteed-no-redirection macros, so as to get from function
- wrappers to the functions they are wrapping. The whole point is to
- construct standard call sequences, but to do the call itself with a
- special no-redirect call pseudo-instruction that the JIT
- understands and handles specially. This section is long and
- repetitious, and I can't see a way to make it shorter.
- The naming scheme is as follows:
- CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
- 'W' stands for "word" and 'v' for "void". Hence there are
- different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
- and for each, the possibility of returning a word-typed result, or
- no result.
- */
- /* Use these to write the name of your wrapper. NOTE: duplicates
- VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
- the default behaviour equivalance class tag "0000" into the name.
- See pub_tool_redir.h for details -- normally you don't need to
- think about this, though. */
- /* Use an extra level of macroisation so as to ensure the soname/fnname
- args are fully macro-expanded before pasting them together. */
- #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
- #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
- #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
- /* Use this macro from within a wrapper function to collect the
- context (address and possibly other info) of the original function.
- Once you have that you can then use it in one of the CALL_FN_
- macros. The type of the argument _lval is OrigFn. */
- #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
- /* Also provide end-user facilities for function replacement, rather
- than wrapping. A replacement function differs from a wrapper in
- that it has no way to get hold of the original function being
- called, and hence no way to call onwards to it. In a replacement
- function, VALGRIND_GET_ORIG_FN always returns zero. */
- #define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
- VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
- #define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
- VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
- /* Derivatives of the main macros below, for calling functions
- returning void. */
- #define CALL_FN_v_v(fnptr) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_v(_junk,fnptr); } while (0)
- #define CALL_FN_v_W(fnptr, arg1) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
- #define CALL_FN_v_WW(fnptr, arg1,arg2) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
- #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
- #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
- #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
- #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
- #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
- do { volatile unsigned long _junk; \
- CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
- /* ----------------- x86-{linux,darwin,solaris} ---------------- */
- #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
- || defined(PLAT_x86_solaris)
- /* These regs are trashed by the hidden call. No need to mention eax
- as gcc can already see that, plus causes gcc to bomb. */
- #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- #define VALGRIND_ALIGN_STACK \
- "movl %%esp,%%edi\n\t" \
- "andl $0xfffffff0,%%esp\n\t"
- #define VALGRIND_RESTORE_STACK \
- "movl %%edi,%%esp\n\t"
- /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
- long) == 4. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $12, %%esp\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $8, %%esp\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "subl $4, %%esp\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "pushl 48(%%eax)\n\t" \
- "pushl 44(%%eax)\n\t" \
- "pushl 40(%%eax)\n\t" \
- "pushl 36(%%eax)\n\t" \
- "pushl 32(%%eax)\n\t" \
- "pushl 28(%%eax)\n\t" \
- "pushl 24(%%eax)\n\t" \
- "pushl 20(%%eax)\n\t" \
- "pushl 16(%%eax)\n\t" \
- "pushl 12(%%eax)\n\t" \
- "pushl 8(%%eax)\n\t" \
- "pushl 4(%%eax)\n\t" \
- "movl (%%eax), %%eax\n\t" /* target->%eax */ \
- VALGRIND_CALL_NOREDIR_EAX \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */
- /* ---------------- amd64-{linux,darwin,solaris} --------------- */
- #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
- || defined(PLAT_amd64_solaris)
- /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
- "rdi", "r8", "r9", "r10", "r11"
- /* This is all pretty complex. It's so as to make stack unwinding
- work reliably. See bug 243270. The basic problem is the sub and
- add of 128 of %rsp in all of the following macros. If gcc believes
- the CFA is in %rsp, then unwinding may fail, because what's at the
- CFA is not what gcc "expected" when it constructs the CFIs for the
- places where the macros are instantiated.
- But we can't just add a CFI annotation to increase the CFA offset
- by 128, to match the sub of 128 from %rsp, because we don't know
- whether gcc has chosen %rsp as the CFA at that point, or whether it
- has chosen some other register (eg, %rbp). In the latter case,
- adding a CFI annotation to change the CFA offset is simply wrong.
- So the solution is to get hold of the CFA using
- __builtin_dwarf_cfa(), put it in a known register, and add a
- CFI annotation to say what the register is. We choose %rbp for
- this (perhaps perversely), because:
- (1) %rbp is already subject to unwinding. If a new register was
- chosen then the unwinder would have to unwind it in all stack
- traces, which is expensive, and
- (2) %rbp is already subject to precise exception updates in the
- JIT. If a new register was chosen, we'd have to have precise
- exceptions for it too, which reduces performance of the
- generated code.
- However .. one extra complication. We can't just whack the result
- of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
- list of trashed registers at the end of the inline assembly
- fragments; gcc won't allow %rbp to appear in that list. Hence
- instead we need to stash %rbp in %r15 for the duration of the asm,
- and say that %r15 is trashed instead. gcc seems happy to go with
- that.
- Oh .. and this all needs to be conditionalised so that it is
- unchanged from before this commit, when compiled with older gccs
- that don't support __builtin_dwarf_cfa. Furthermore, since
- this header file is freestanding, it has to be independent of
- config.h, and so the following conditionalisation cannot depend on
- configure time checks.
- Although it's not clear from
- 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
- this expression excludes Darwin.
- .cfi directives in Darwin assembly appear to be completely
- different and I haven't investigated how they work.
- For even more entertainment value, note we have to use the
- completely undocumented __builtin_dwarf_cfa(), which appears to
- really compute the CFA, whereas __builtin_frame_address(0) claims
- to but actually doesn't. See
- https://bugs.kde.org/show_bug.cgi?id=243270#c47
- */
- #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
- # define __FRAME_POINTER \
- ,"r"(__builtin_dwarf_cfa())
- # define VALGRIND_CFI_PROLOGUE \
- "movq %%rbp, %%r15\n\t" \
- "movq %2, %%rbp\n\t" \
- ".cfi_remember_state\n\t" \
- ".cfi_def_cfa rbp, 0\n\t"
- # define VALGRIND_CFI_EPILOGUE \
- "movq %%r15, %%rbp\n\t" \
- ".cfi_restore_state\n\t"
- #else
- # define __FRAME_POINTER
- # define VALGRIND_CFI_PROLOGUE
- # define VALGRIND_CFI_EPILOGUE
- #endif
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- #define VALGRIND_ALIGN_STACK \
- "movq %%rsp,%%r14\n\t" \
- "andq $0xfffffffffffffff0,%%rsp\n\t"
- #define VALGRIND_RESTORE_STACK \
- "movq %%r14,%%rsp\n\t"
- /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
- long) == 8. */
- /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
- macros. In order not to trash the stack redzone, we need to drop
- %rsp by 128 before the hidden call, and restore afterwards. The
- nastyness is that it is only by luck that the stack still appears
- to be unwindable during the hidden call - since then the behaviour
- of any routine using this macro does not match what the CFI data
- says. Sigh.
- Why is this important? Imagine that a wrapper has a stack
- allocated local, and passes to the hidden call, a pointer to it.
- Because gcc does not know about the hidden call, it may allocate
- that local in the redzone. Unfortunately the hidden call may then
- trash it before it comes to use it. So we must step clear of the
- redzone, for the duration of the hidden call, to make it safe.
- Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux); but for those, the stack is
- self describing (none of this CFI nonsense) so at least messing
- with the stack pointer doesn't give a danger of non-unwindable
- stack. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $136,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- VALGRIND_ALIGN_STACK \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- VALGRIND_RESTORE_STACK \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
- /* ------------------------ ppc32-linux ------------------------ */
- #if defined(PLAT_ppc32_linux)
- /* This is useful for finding out about the on-stack stuff:
- extern int f9 ( int,int,int,int,int,int,int,int,int );
- extern int f10 ( int,int,int,int,int,int,int,int,int,int );
- extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
- extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
- int g9 ( void ) {
- return f9(11,22,33,44,55,66,77,88,99);
- }
- int g10 ( void ) {
- return f10(11,22,33,44,55,66,77,88,99,110);
- }
- int g11 ( void ) {
- return f11(11,22,33,44,55,66,77,88,99,110,121);
- }
- int g12 ( void ) {
- return f12(11,22,33,44,55,66,77,88,99,110,121,132);
- }
- */
- /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- #define VALGRIND_ALIGN_STACK \
- "mr 28,1\n\t" \
- "rlwinm 1,1,0,0,27\n\t"
- #define VALGRIND_RESTORE_STACK \
- "mr 1,28\n\t"
- /* These CALL_FN_ macros assume that on ppc32-linux,
- sizeof(unsigned long) == 4. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "addi 1,1,-32\n\t" \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,20(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,16(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- VALGRIND_RESTORE_STACK \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_ppc32_linux */
- /* ------------------------ ppc64-linux ------------------------ */
- #if defined(PLAT_ppc64be_linux)
- /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- #define VALGRIND_ALIGN_STACK \
- "mr 28,1\n\t" \
- "rldicr 1,1,0,59\n\t"
- #define VALGRIND_RESTORE_STACK \
- "mr 1,28\n\t"
- /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
- long) == 8. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 11,%1\n\t" \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_ppc64be_linux */
- /* ------------------------- ppc64le-linux ----------------------- */
- #if defined(PLAT_ppc64le_linux)
- /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- #define VALGRIND_ALIGN_STACK \
- "mr 28,1\n\t" \
- "rldicr 1,1,0,59\n\t"
- #define VALGRIND_RESTORE_STACK \
- "mr 1,28\n\t"
- /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
- long) == 8. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 10, 64(12)\n\t" /* arg8->r10 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg9 */ \
- "ld 3,72(12)\n\t" \
- "std 3,96(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 10, 64(12)\n\t" /* arg8->r10 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-128\n\t" /* expand stack frame */ \
- /* arg10 */ \
- "ld 3,80(12)\n\t" \
- "std 3,104(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(12)\n\t" \
- "std 3,96(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 10, 64(12)\n\t" /* arg8->r10 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg11 */ \
- "ld 3,88(12)\n\t" \
- "std 3,112(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(12)\n\t" \
- "std 3,104(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(12)\n\t" \
- "std 3,96(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 10, 64(12)\n\t" /* arg8->r10 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "mr 12,%1\n\t" \
- "std 2,-16(12)\n\t" /* save tocptr */ \
- "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
- "addi 1,1,-144\n\t" /* expand stack frame */ \
- /* arg12 */ \
- "ld 3,96(12)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(12)\n\t" \
- "std 3,112(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(12)\n\t" \
- "std 3,104(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(12)\n\t" \
- "std 3,96(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(12)\n\t" /* arg1->r3 */ \
- "ld 4, 16(12)\n\t" /* arg2->r4 */ \
- "ld 5, 24(12)\n\t" /* arg3->r5 */ \
- "ld 6, 32(12)\n\t" /* arg4->r6 */ \
- "ld 7, 40(12)\n\t" /* arg5->r7 */ \
- "ld 8, 48(12)\n\t" /* arg6->r8 */ \
- "ld 9, 56(12)\n\t" /* arg7->r9 */ \
- "ld 10, 64(12)\n\t" /* arg8->r10 */ \
- "ld 12, 0(12)\n\t" /* target->r12 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
- "mr 12,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(12)\n\t" /* restore tocptr */ \
- VALGRIND_RESTORE_STACK \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_ppc64le_linux */
- /* ------------------------- arm-linux ------------------------- */
- #if defined(PLAT_arm_linux)
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14"
- /* Macros to save and align the stack before making a function
- call and restore it afterwards as gcc may not keep the stack
- pointer aligned if it doesn't realise calls are being made
- to other functions. */
- /* This is a bit tricky. We store the original stack pointer in r10
- as it is callee-saves. gcc doesn't allow the use of r11 for some
- reason. Also, we can't directly "bic" the stack pointer in thumb
- mode since r13 isn't an allowed register number in that context.
- So use r4 as a temporary, since that is about to get trashed
- anyway, just after each use of this macro. Side effect is we need
- to be very careful about any future changes, since
- VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
- #define VALGRIND_ALIGN_STACK \
- "mov r10, sp\n\t" \
- "mov r4, sp\n\t" \
- "bic r4, r4, #7\n\t" \
- "mov sp, r4\n\t"
- #define VALGRIND_RESTORE_STACK \
- "mov sp, r10\n\t"
- /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
- long) == 4. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "push {r0, r1, r2, r3} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #40] \n\t" \
- "push {r0} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #4 \n\t" \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "push {r0, r1} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr r0, [%1, #40] \n\t" \
- "ldr r1, [%1, #44] \n\t" \
- "ldr r2, [%1, #48] \n\t" \
- "push {r0, r1, r2} \n\t" \
- "ldr r0, [%1, #20] \n\t" \
- "ldr r1, [%1, #24] \n\t" \
- "ldr r2, [%1, #28] \n\t" \
- "ldr r3, [%1, #32] \n\t" \
- "ldr r4, [%1, #36] \n\t" \
- "push {r0, r1, r2, r3, r4} \n\t" \
- "ldr r0, [%1, #4] \n\t" \
- "ldr r1, [%1, #8] \n\t" \
- "ldr r2, [%1, #12] \n\t" \
- "ldr r3, [%1, #16] \n\t" \
- "ldr r4, [%1] \n\t" /* target->r4 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- VALGRIND_RESTORE_STACK \
- "mov %0, r0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_arm_linux */
- /* ------------------------ arm64-linux ------------------------ */
- #if defined(PLAT_arm64_linux)
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS \
- "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \
- "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \
- "x18", "x19", "x20", "x30", \
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \
- "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \
- "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \
- "v26", "v27", "v28", "v29", "v30", "v31"
- /* x21 is callee-saved, so we can use it to save and restore SP around
- the hidden call. */
- #define VALGRIND_ALIGN_STACK \
- "mov x21, sp\n\t" \
- "bic sp, x21, #15\n\t"
- #define VALGRIND_RESTORE_STACK \
- "mov sp, x21\n\t"
- /* These CALL_FN_ macros assume that on arm64-linux,
- sizeof(unsigned long) == 8. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x7, [%1, #64] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #0x20 \n\t" \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x7, [%1, #64] \n\t" \
- "ldr x8, [%1, #72] \n\t" \
- "str x8, [sp, #0] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #0x20 \n\t" \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x7, [%1, #64] \n\t" \
- "ldr x8, [%1, #72] \n\t" \
- "str x8, [sp, #0] \n\t" \
- "ldr x8, [%1, #80] \n\t" \
- "str x8, [sp, #8] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #0x30 \n\t" \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x7, [%1, #64] \n\t" \
- "ldr x8, [%1, #72] \n\t" \
- "str x8, [sp, #0] \n\t" \
- "ldr x8, [%1, #80] \n\t" \
- "str x8, [sp, #8] \n\t" \
- "ldr x8, [%1, #88] \n\t" \
- "str x8, [sp, #16] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11, \
- arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_ALIGN_STACK \
- "sub sp, sp, #0x30 \n\t" \
- "ldr x0, [%1, #8] \n\t" \
- "ldr x1, [%1, #16] \n\t" \
- "ldr x2, [%1, #24] \n\t" \
- "ldr x3, [%1, #32] \n\t" \
- "ldr x4, [%1, #40] \n\t" \
- "ldr x5, [%1, #48] \n\t" \
- "ldr x6, [%1, #56] \n\t" \
- "ldr x7, [%1, #64] \n\t" \
- "ldr x8, [%1, #72] \n\t" \
- "str x8, [sp, #0] \n\t" \
- "ldr x8, [%1, #80] \n\t" \
- "str x8, [sp, #8] \n\t" \
- "ldr x8, [%1, #88] \n\t" \
- "str x8, [sp, #16] \n\t" \
- "ldr x8, [%1, #96] \n\t" \
- "str x8, [sp, #24] \n\t" \
- "ldr x8, [%1] \n\t" /* target->x8 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
- VALGRIND_RESTORE_STACK \
- "mov %0, x0" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_arm64_linux */
- /* ------------------------- s390x-linux ------------------------- */
- #if defined(PLAT_s390x_linux)
- /* Similar workaround as amd64 (see above), but we use r11 as frame
- pointer and save the old r11 in r7. r11 might be used for
- argvec, therefore we copy argvec in r1 since r1 is clobbered
- after the call anyway. */
- #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
- # define __FRAME_POINTER \
- ,"d"(__builtin_dwarf_cfa())
- # define VALGRIND_CFI_PROLOGUE \
- ".cfi_remember_state\n\t" \
- "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
- "lgr 7,11\n\t" \
- "lgr 11,%2\n\t" \
- ".cfi_def_cfa r11, 0\n\t"
- # define VALGRIND_CFI_EPILOGUE \
- "lgr 11, 7\n\t" \
- ".cfi_restore_state\n\t"
- #else
- # define __FRAME_POINTER
- # define VALGRIND_CFI_PROLOGUE \
- "lgr 1,%1\n\t"
- # define VALGRIND_CFI_EPILOGUE
- #endif
- /* Nb: On s390 the stack pointer is properly aligned *at all times*
- according to the s390 GCC maintainer. (The ABI specification is not
- precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
- VALGRIND_RESTORE_STACK are not defined here. */
- /* These regs are trashed by the hidden call. Note that we overwrite
- r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
- function a proper return address. All others are ABI defined call
- clobbers. */
- #define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
- "f0","f1","f2","f3","f4","f5","f6","f7"
- /* Nb: Although r11 is modified in the asm snippets below (inside
- VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
- two reasons:
- (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
- modified
- (2) GCC will complain that r11 cannot appear inside a clobber section,
- when compiled with -O -fno-omit-frame-pointer
- */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 1, 0(1)\n\t" /* target->r1 */ \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- /* The call abi has the arguments in r2-r6 and stack */
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1, arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-160\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,160\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-168\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,168\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-176\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,176\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-184\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,184\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-192\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,192\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-200\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,200\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-208\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,208\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-216\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "mvc 208(8,15), 96(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,216\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_s390x_linux */
- /* ------------------------- mips32-linux ----------------------- */
-
- #if defined(PLAT_mips32_linux)
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
- "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "$25", "$31"
- /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
- long) == 4. */
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16\n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" /* arg1*/ \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "subu $29, $29, 16 \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 16 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 24\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 24 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 32\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "nop\n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 32 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 32\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 32 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 40\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 40 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 40\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 40 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 48\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 48 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 48\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 44(%1) \n\t" \
- "sw $4, 40($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 48 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- "subu $29, $29, 8 \n\t" \
- "sw $28, 0($29) \n\t" \
- "sw $31, 4($29) \n\t" \
- "lw $4, 20(%1) \n\t" \
- "subu $29, $29, 56\n\t" \
- "sw $4, 16($29) \n\t" \
- "lw $4, 24(%1) \n\t" \
- "sw $4, 20($29) \n\t" \
- "lw $4, 28(%1) \n\t" \
- "sw $4, 24($29) \n\t" \
- "lw $4, 32(%1) \n\t" \
- "sw $4, 28($29) \n\t" \
- "lw $4, 36(%1) \n\t" \
- "sw $4, 32($29) \n\t" \
- "lw $4, 40(%1) \n\t" \
- "sw $4, 36($29) \n\t" \
- "lw $4, 44(%1) \n\t" \
- "sw $4, 40($29) \n\t" \
- "lw $4, 48(%1) \n\t" \
- "sw $4, 44($29) \n\t" \
- "lw $4, 4(%1) \n\t" \
- "lw $5, 8(%1) \n\t" \
- "lw $6, 12(%1) \n\t" \
- "lw $7, 16(%1) \n\t" \
- "lw $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "addu $29, $29, 56 \n\t" \
- "lw $28, 0($29) \n\t" \
- "lw $31, 4($29) \n\t" \
- "addu $29, $29, 8 \n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
- #endif /* PLAT_mips32_linux */
- /* ------------------------- mips64-linux ------------------------- */
- #if defined(PLAT_mips64_linux)
- /* These regs are trashed by the hidden call. */
- #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
- "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "$25", "$31"
- /* These CALL_FN_ macros assume that on mips64-linux,
- sizeof(long long) == 8. */
- #define MIPS64_LONG2REG_CAST(x) ((long long)(long)x)
- #define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[1]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- __asm__ volatile( \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[2]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" /* arg1*/ \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[3]; \
- volatile unsigned long long _res; \
- _argvec[0] = _orig.nraddr; \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[4]; \
- volatile unsigned long long _res; \
- _argvec[0] = _orig.nraddr; \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[5]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[6]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[7]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[8]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[9]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
- __asm__ volatile( \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1) \n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[10]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
- _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
- __asm__ volatile( \
- "dsubu $29, $29, 8\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 8\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[11]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
- _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
- _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
- __asm__ volatile( \
- "dsubu $29, $29, 16\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 16\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[12]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
- _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
- _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
- _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
- __asm__ volatile( \
- "dsubu $29, $29, 24\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 88(%1)\n\t" \
- "sd $4, 16($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 24\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
- arg6,arg7,arg8,arg9,arg10, \
- arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long long _argvec[13]; \
- volatile unsigned long long _res; \
- _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
- _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
- _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
- _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
- _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
- _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
- _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
- _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
- _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
- _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
- _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
- _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
- _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \
- __asm__ volatile( \
- "dsubu $29, $29, 32\n\t" \
- "ld $4, 72(%1)\n\t" \
- "sd $4, 0($29)\n\t" \
- "ld $4, 80(%1)\n\t" \
- "sd $4, 8($29)\n\t" \
- "ld $4, 88(%1)\n\t" \
- "sd $4, 16($29)\n\t" \
- "ld $4, 96(%1)\n\t" \
- "sd $4, 24($29)\n\t" \
- "ld $4, 8(%1)\n\t" \
- "ld $5, 16(%1)\n\t" \
- "ld $6, 24(%1)\n\t" \
- "ld $7, 32(%1)\n\t" \
- "ld $8, 40(%1)\n\t" \
- "ld $9, 48(%1)\n\t" \
- "ld $10, 56(%1)\n\t" \
- "ld $11, 64(%1)\n\t" \
- "ld $25, 0(%1)\n\t" /* target->t9 */ \
- VALGRIND_CALL_NOREDIR_T9 \
- "daddu $29, $29, 32\n\t" \
- "move %0, $2\n" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) (long)_res; \
- } while (0)
- #endif /* PLAT_mips64_linux */
- /* ------------------------------------------------------------------ */
- /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
- /* */
- /* ------------------------------------------------------------------ */
- /* Some request codes. There are many more of these, but most are not
- exposed to end-user view. These are the public ones, all of the
- form 0x1000 + small_number.
- Core ones are in the range 0x00000000--0x0000ffff. The non-public
- ones start at 0x2000.
- */
- /* These macros are used by tools -- they must be public, but don't
- embed them into other programs. */
- #define VG_USERREQ_TOOL_BASE(a,b) \
- ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
- #define VG_IS_TOOL_USERREQ(a, b, v) \
- (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
- /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
- This enum comprises an ABI exported by Valgrind to programs
- which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
- ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
- relevant group. */
- typedef
- enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
- VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
- /* These allow any function to be called from the simulated
- CPU but run on the real CPU. Nb: the first arg passed to
- the function is always the ThreadId of the running
- thread! So CLIENT_CALL0 actually requires a 1 arg
- function, etc. */
- VG_USERREQ__CLIENT_CALL0 = 0x1101,
- VG_USERREQ__CLIENT_CALL1 = 0x1102,
- VG_USERREQ__CLIENT_CALL2 = 0x1103,
- VG_USERREQ__CLIENT_CALL3 = 0x1104,
- /* Can be useful in regression testing suites -- eg. can
- send Valgrind's output to /dev/null and still count
- errors. */
- VG_USERREQ__COUNT_ERRORS = 0x1201,
- /* Allows the client program and/or gdbserver to execute a monitor
- command. */
- VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
- /* These are useful and can be interpreted by any tool that
- tracks malloc() et al, by using vg_replace_malloc.c. */
- VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
- VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
- VG_USERREQ__FREELIKE_BLOCK = 0x1302,
- /* Memory pool support. */
- VG_USERREQ__CREATE_MEMPOOL = 0x1303,
- VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
- VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
- VG_USERREQ__MEMPOOL_FREE = 0x1306,
- VG_USERREQ__MEMPOOL_TRIM = 0x1307,
- VG_USERREQ__MOVE_MEMPOOL = 0x1308,
- VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
- VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
- /* Allow printfs to valgrind log. */
- /* The first two pass the va_list argument by value, which
- assumes it is the same size as or smaller than a UWord,
- which generally isn't the case. Hence are deprecated.
- The second two pass the vargs by reference and so are
- immune to this problem. */
- /* both :: char* fmt, va_list vargs (DEPRECATED) */
- VG_USERREQ__PRINTF = 0x1401,
- VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
- /* both :: char* fmt, va_list* vargs */
- VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
- /* Stack support. */
- VG_USERREQ__STACK_REGISTER = 0x1501,
- VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503,
- /* Wine support */
- VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
- /* Querying of debug info. */
- VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701,
- /* Disable/enable error reporting level. Takes a single
- Word arg which is the delta to this thread's error
- disablement indicator. Hence 1 disables or further
- disables errors, and -1 moves back towards enablement.
- Other values are not allowed. */
- VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
- /* Some requests used for Valgrind internal, such as
- self-test or self-hosting. */
- /* Initialise IR injection */
- VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901,
- /* Used by Inner Valgrind to inform Outer Valgrind where to
- find the list of inner guest threads */
- VG_USERREQ__INNER_THREADS = 0x1902
- } Vg_ClientRequest;
- #if !defined(__GNUC__)
- # define __extension__ /* */
- #endif
- /* Returns the number of Valgrinds this code is running under. That
- is, 0 if running natively, 1 if running under Valgrind, 2 if
- running under Valgrind which is running under another Valgrind,
- etc. */
- #define RUNNING_ON_VALGRIND \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0) \
- /* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
- _qzz_len - 1]. Useful if you are debugging a JITter or some such,
- since it provides a way to make sure valgrind will retranslate the
- invalidated area. Returns no value. */
- #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0)
- #define VALGRIND_INNER_THREADS(_qzz_addr) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \
- _qzz_addr, 0, 0, 0, 0)
- /* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. The return value
- is the number of characters printed, excluding the "**<pid>** " part at the
- start and the backtrace (if present). */
- #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
- /* Modern GCC will optimize the static routine out if unused,
- and unused attribute will shut down warnings about it. */
- static int VALGRIND_PRINTF(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
- #endif
- static int
- #if defined(_MSC_VER)
- __inline
- #endif
- VALGRIND_PRINTF(const char *format, ...)
- {
- #if defined(NVALGRIND)
- (void)format;
- return 0;
- #else /* NVALGRIND */
- #if defined(_MSC_VER) || defined(__MINGW64__)
- uintptr_t _qzz_res;
- #else
- unsigned long _qzz_res;
- #endif
- va_list vargs;
- va_start(vargs, format);
- #if defined(_MSC_VER) || defined(__MINGW64__)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
- #else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
- #endif
- va_end(vargs);
- return (int)_qzz_res;
- #endif /* NVALGRIND */
- }
- #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
- static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
- #endif
- static int
- #if defined(_MSC_VER)
- __inline
- #endif
- VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- {
- #if defined(NVALGRIND)
- (void)format;
- return 0;
- #else /* NVALGRIND */
- #if defined(_MSC_VER) || defined(__MINGW64__)
- uintptr_t _qzz_res;
- #else
- unsigned long _qzz_res;
- #endif
- va_list vargs;
- va_start(vargs, format);
- #if defined(_MSC_VER) || defined(__MINGW64__)
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (uintptr_t)format,
- (uintptr_t)&vargs,
- 0, 0, 0);
- #else
- _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
- VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
- (unsigned long)format,
- (unsigned long)&vargs,
- 0, 0, 0);
- #endif
- va_end(vargs);
- return (int)_qzz_res;
- #endif /* NVALGRIND */
- }
- /* These requests allow control to move from the simulated CPU to the
- real CPU, calling an arbitrary function.
-
- Note that the current ThreadId is inserted as the first argument.
- So this call:
- VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
- requires f to have this signature:
- Word f(Word tid, Word arg1, Word arg2)
- where "Word" is a word-sized type.
- Note that these client requests are not entirely reliable. For example,
- if you call a function with them that subsequently calls printf(),
- there's a high chance Valgrind will crash. Generally, your prospects of
- these working are made higher if the called function does not refer to
- any global variables, and does not refer to any libc or other functions
- (printf et al). Any kind of entanglement with libc or dynamic linking is
- likely to have a bad outcome, for tricky reasons which we've grappled
- with a lot in the past.
- */
- #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0)
- #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0)
- #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0)
- #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0)
- /* Counts the number of errors that have been recorded by a tool. Nb:
- the tool must record the errors with VG_(maybe_record_error)() or
- VG_(unique_error)() for them to be counted. */
- #define VALGRIND_COUNT_ERRORS \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
- 0 /* default return */, \
- VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0)
- /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
- when heap blocks are allocated in order to give accurate results. This
- happens automatically for the standard allocator functions such as
- malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
- delete[], etc.
- But if your program uses a custom allocator, this doesn't automatically
- happen, and Valgrind will not do as well. For example, if you allocate
- superblocks with mmap() and then allocates chunks of the superblocks, all
- Valgrind's observations will be at the mmap() level and it won't know that
- the chunks should be considered separate entities. In Memcheck's case,
- that means you probably won't get heap block overrun detection (because
- there won't be redzones marked as unaddressable) and you definitely won't
- get any leak detection.
- The following client requests allow a custom allocator to be annotated so
- that it can be handled accurately by Valgrind.
- VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
- by a malloc()-like function. For Memcheck (an illustrative case), this
- does two things:
- - It records that the block has been allocated. This means any addresses
- within the block mentioned in error messages will be
- identified as belonging to the block. It also means that if the block
- isn't freed it will be detected by the leak checker.
- - It marks the block as being addressable and undefined (if 'is_zeroed' is
- not set), or addressable and defined (if 'is_zeroed' is set). This
- controls how accesses to the block by the program are handled.
-
- 'addr' is the start of the usable block (ie. after any
- redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
- can apply redzones -- these are blocks of padding at the start and end of
- each block. Adding redzones is recommended as it makes it much more likely
- Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
- zeroed (or filled with another predictable value), as is the case for
- calloc().
-
- VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
- heap block -- that will be used by the client program -- is allocated.
- It's best to put it at the outermost level of the allocator if possible;
- for example, if you have a function my_alloc() which calls
- internal_alloc(), and the client request is put inside internal_alloc(),
- stack traces relating to the heap block will contain entries for both
- my_alloc() and internal_alloc(), which is probably not what you want.
- For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
- custom blocks from within a heap block, B, that has been allocated with
- malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
- -- the custom blocks will take precedence.
- VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
- Memcheck, it does two things:
- - It records that the block has been deallocated. This assumes that the
- block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- - It marks the block as being unaddressable.
- VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
- heap block is deallocated.
- VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
- Memcheck, it does four things:
- - It records that the size of a block has been changed. This assumes that
- the block was annotated as having been allocated via
- VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- - If the block shrunk, it marks the freed memory as being unaddressable.
- - If the block grew, it marks the new area as undefined and defines a red
- zone past the end of the new block.
- - The V-bits of the overlap between the old and the new block are preserved.
- VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
- and before deallocation of the old block.
- In many cases, these three client requests will not be enough to get your
- allocator working well with Memcheck. More specifically, if your allocator
- writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
- will be necessary to mark the memory as addressable just before the zeroing
- occurs, otherwise you'll get a lot of invalid write errors. For example,
- you'll need to do this if your allocator recycles freed blocks, but it
- zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
- Alternatively, if your allocator reuses freed blocks for allocator-internal
- data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
- Really, what's happening is a blurring of the lines between the client
- program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
- memory should be considered unaddressable to the client program, but the
- allocator knows more than the rest of the client program and so may be able
- to safely access it. Extra client requests are necessary for Valgrind to
- understand the distinction between the allocator and the rest of the
- program.
- Ignored if addr == 0.
- */
- #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0)
- /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
- */
- #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \
- addr, oldSizeB, newSizeB, rzB, 0)
- /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
- Ignored if addr == 0.
- */
- #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0)
- /* Create a memory pool. */
- #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0)
- /* Create a memory pool with some flags specifying extended behaviour.
- When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
-
- The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
- associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used
- by the application as superblocks to dole out MALLOC_LIKE blocks using
- VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels"
- pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC.
- The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK.
- Note that the association between the pool and the second level blocks
- is implicit : second level blocks will be located inside first level
- blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag
- for such 2 levels pools, as otherwise valgrind will detect overlapping
- memory blocks, and will abort execution (e.g. during leak search).
- Such a meta pool can also be marked as an 'auto free' pool using the flag
- VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the
- VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE
- will automatically free the second level blocks that are contained
- inside the first level block freed with VALGRIND_MEMPOOL_FREE.
- In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls
- to VALGRIND_FREELIKE_BLOCK for all the second level blocks included
- in the first level block.
- Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag
- without the VALGRIND_MEMPOOL_METAPOOL flag.
- */
- #define VALGRIND_MEMPOOL_AUTO_FREE 1
- #define VALGRIND_MEMPOOL_METAPOOL 2
- #define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, flags, 0)
- /* Destroy a memory pool. */
- #define VALGRIND_DESTROY_MEMPOOL(pool) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0)
- /* Associate a piece of memory with a memory pool. */
- #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0)
- /* Disassociate a piece of memory from a memory pool. */
- #define VALGRIND_MEMPOOL_FREE(pool, addr) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0)
- /* Disassociate any pieces outside a particular range. */
- #define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0)
- /* Resize and/or move a piece associated with a memory pool. */
- #define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0)
- /* Resize and/or move a piece associated with a memory pool. */
- #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0)
- /* Return 1 if a mempool exists, else 0. */
- #define VALGRIND_MEMPOOL_EXISTS(pool) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0)
- /* Mark a piece of memory as being a stack. Returns a stack id.
- start is the lowest addressable stack byte, end is the highest
- addressable stack byte. */
- #define VALGRIND_STACK_REGISTER(start, end) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0)
- /* Unmark the piece of memory associated with a stack id as being a
- stack. */
- #define VALGRIND_STACK_DEREGISTER(id) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0)
- /* Change the start and end address of the stack id.
- start is the new lowest addressable stack byte, end is the new highest
- addressable stack byte. */
- #define VALGRIND_STACK_CHANGE(id, start, end) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0)
- /* Load PDB debug info for Wine PE image_map. */
- #define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \
- fd, ptr, total_size, delta, 0)
- /* Map a code address to a source file name and line number. buf64
- must point to a 64-byte buffer in the caller's address space. The
- result will be dumped in there and is guaranteed to be zero
- terminated. If no info is found, the first byte is set to zero. */
- #define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
- (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
- VG_USERREQ__MAP_IP_TO_SRCLOC, \
- addr, buf64, 0, 0, 0)
- /* Disable error reporting for this thread. Behaves in a stack like
- way, so you can safely call this multiple times provided that
- VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
- to re-enable reporting. The first call of this macro disables
- reporting. Subsequent calls have no effect except to increase the
- number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
- reporting. Child threads do not inherit this setting from their
- parents -- they are always created with reporting enabled. */
- #define VALGRIND_DISABLE_ERROR_REPORTING \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
- 1, 0, 0, 0, 0)
- /* Re-enable error reporting, as per comments on
- VALGRIND_DISABLE_ERROR_REPORTING. */
- #define VALGRIND_ENABLE_ERROR_REPORTING \
- VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
- -1, 0, 0, 0, 0)
- /* Execute a monitor command from the client program.
- If a connection is opened with GDB, the output will be sent
- according to the output mode set for vgdb.
- If no connection is opened, output will go to the log output.
- Returns 1 if command not recognised, 0 otherwise. */
- #define VALGRIND_MONITOR_COMMAND(command) \
- VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
- command, 0, 0, 0, 0)
- #undef PLAT_x86_darwin
- #undef PLAT_amd64_darwin
- #undef PLAT_x86_win32
- #undef PLAT_amd64_win64
- #undef PLAT_x86_linux
- #undef PLAT_amd64_linux
- #undef PLAT_ppc32_linux
- #undef PLAT_ppc64be_linux
- #undef PLAT_ppc64le_linux
- #undef PLAT_arm_linux
- #undef PLAT_s390x_linux
- #undef PLAT_mips32_linux
- #undef PLAT_mips64_linux
- #undef PLAT_x86_solaris
- #undef PLAT_amd64_solaris
- #endif /* __VALGRIND_H */
|