arm_neon.h 2.3 MB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814228152281622817228182281922820228212282222823228242282522826228272282822829228302283122832228332283422835228362283722838228392284022841228422284322844228452284622847228482284922850228512285222853228542285522856228572285822859228602286122862228632286422865228662286722868228692287022871228722287322874228752287622877228782287922880228812288222883228842288522886228872288822889228902289122892228932289422895228962289722898228992290022901229022290322904229052290622907229082290922910229112291222913229142291522916229172291822919229202292122922229232292422925229262292722928229292293022931229322293322934229352293622937229382293922940229412294222943229442294522946229472294822949229502295122952229532295422955229562295722958229592296022961229622296322964229652296622967229682296922970229712297222973229742297522976229772297822979229802298122982229832298422985229862298722988229892299022991229922299322994229952299622997229982299923000230012300223003230042300523006230072300823009230102301123012230132301423015230162301723018230192302023021230222302323024230252302623027230282302923030230312303223033230342303523036230372303823039230402304123042230432304423045230462304723048230492305023051230522305323054230552305623057230582305923060230612306223063230642306523066230672306823069230702307123072230732307423075230762307723078230792308023081230822308323084230852308623087230882308923090230912309223093230942309523096230972309823099231002310123102231032310423105231062310723108231092311023111231122311323114231152311623117231182311923120231212312223123231242312523126231272312823129231302313123132231332313423135231362313723138231392314023141231422314323144231452314623147231482314923150231512315223153231542315523156231572315823159231602316123162231632316423165231662316723168231692317023171231722317323174231752317623177231782317923180231812318223183231842318523186231872318823189231902319123192231932319423195231962319723198231992320023201232022320323204232052320623207232082320923210232112321223213232142321523216232172321823219232202322123222232232322423225232262322723228232292323023231232322323323234232352323623237232382323923240232412324223243232442324523246232472324823249232502325123252232532325423255232562325723258232592326023261232622326323264232652326623267232682326923270232712327223273232742327523276232772327823279232802328123282232832328423285232862328723288232892329023291232922329323294232952329623297232982329923300233012330223303233042330523306233072330823309233102331123312233132331423315233162331723318233192332023321233222332323324233252332623327233282332923330233312333223333233342333523336233372333823339233402334123342233432334423345233462334723348233492335023351233522335323354233552335623357233582335923360233612336223363233642336523366233672336823369233702337123372233732337423375233762337723378233792338023381233822338323384233852338623387233882338923390233912339223393233942339523396233972339823399234002340123402234032340423405234062340723408234092341023411234122341323414234152341623417234182341923420234212342223423234242342523426234272342823429234302343123432234332343423435234362343723438234392344023441234422344323444234452344623447234482344923450234512345223453234542345523456234572345823459234602346123462234632346423465234662346723468234692347023471234722347323474234752347623477234782347923480234812348223483234842348523486234872348823489234902349123492234932349423495234962349723498234992350023501235022350323504235052350623507235082350923510235112351223513235142351523516235172351823519235202352123522235232352423525235262352723528235292353023531235322353323534235352353623537235382353923540235412354223543235442354523546235472354823549235502355123552235532355423555235562355723558235592356023561235622356323564235652356623567235682356923570235712357223573235742357523576235772357823579235802358123582235832358423585235862358723588235892359023591235922359323594235952359623597235982359923600236012360223603236042360523606236072360823609236102361123612236132361423615236162361723618236192362023621236222362323624236252362623627236282362923630236312363223633236342363523636236372363823639236402364123642236432364423645236462364723648236492365023651236522365323654236552365623657236582365923660236612366223663236642366523666236672366823669236702367123672236732367423675236762367723678236792368023681236822368323684236852368623687236882368923690236912369223693236942369523696236972369823699237002370123702237032370423705237062370723708237092371023711237122371323714237152371623717237182371923720237212372223723237242372523726237272372823729237302373123732237332373423735237362373723738237392374023741237422374323744237452374623747237482374923750237512375223753237542375523756237572375823759237602376123762237632376423765237662376723768237692377023771237722377323774237752377623777237782377923780237812378223783237842378523786237872378823789237902379123792237932379423795237962379723798237992380023801238022380323804238052380623807238082380923810238112381223813238142381523816238172381823819238202382123822238232382423825238262382723828238292383023831238322383323834238352383623837238382383923840238412384223843238442384523846238472384823849238502385123852238532385423855238562385723858238592386023861238622386323864238652386623867238682386923870238712387223873238742387523876238772387823879238802388123882238832388423885238862388723888238892389023891238922389323894238952389623897238982389923900239012390223903239042390523906239072390823909239102391123912239132391423915239162391723918239192392023921239222392323924239252392623927239282392923930239312393223933239342393523936239372393823939239402394123942239432394423945239462394723948239492395023951239522395323954239552395623957239582395923960239612396223963239642396523966239672396823969239702397123972239732397423975239762397723978239792398023981239822398323984239852398623987239882398923990239912399223993239942399523996239972399823999240002400124002240032400424005240062400724008240092401024011240122401324014240152401624017240182401924020240212402224023240242402524026240272402824029240302403124032240332403424035240362403724038240392404024041240422404324044240452404624047240482404924050240512405224053240542405524056240572405824059240602406124062240632406424065240662406724068240692407024071240722407324074240752407624077240782407924080240812408224083240842408524086240872408824089240902409124092240932409424095240962409724098240992410024101241022410324104241052410624107241082410924110241112411224113241142411524116241172411824119241202412124122241232412424125241262412724128241292413024131241322413324134241352413624137241382413924140241412414224143241442414524146241472414824149241502415124152241532415424155241562415724158241592416024161241622416324164241652416624167241682416924170241712417224173241742417524176241772417824179241802418124182241832418424185241862418724188241892419024191241922419324194241952419624197241982419924200242012420224203242042420524206242072420824209242102421124212242132421424215242162421724218242192422024221242222422324224242252422624227242282422924230242312423224233242342423524236242372423824239242402424124242242432424424245242462424724248242492425024251242522425324254242552425624257242582425924260242612426224263242642426524266242672426824269242702427124272242732427424275242762427724278242792428024281242822428324284242852428624287242882428924290242912429224293242942429524296242972429824299243002430124302243032430424305243062430724308243092431024311243122431324314243152431624317243182431924320243212432224323243242432524326243272432824329243302433124332243332433424335243362433724338243392434024341243422434324344243452434624347243482434924350243512435224353243542435524356243572435824359243602436124362243632436424365243662436724368243692437024371243722437324374243752437624377243782437924380243812438224383243842438524386243872438824389243902439124392243932439424395243962439724398243992440024401244022440324404244052440624407244082440924410244112441224413244142441524416244172441824419244202442124422244232442424425244262442724428244292443024431244322443324434244352443624437244382443924440244412444224443244442444524446244472444824449244502445124452244532445424455244562445724458244592446024461244622446324464244652446624467244682446924470244712447224473244742447524476244772447824479244802448124482244832448424485244862448724488244892449024491244922449324494244952449624497244982449924500245012450224503245042450524506245072450824509245102451124512245132451424515245162451724518245192452024521245222452324524245252452624527245282452924530245312453224533245342453524536245372453824539245402454124542245432454424545245462454724548245492455024551245522455324554245552455624557245582455924560245612456224563245642456524566245672456824569245702457124572245732457424575245762457724578245792458024581245822458324584245852458624587245882458924590245912459224593245942459524596245972459824599246002460124602246032460424605246062460724608246092461024611246122461324614246152461624617246182461924620246212462224623246242462524626246272462824629246302463124632246332463424635246362463724638246392464024641246422464324644246452464624647246482464924650246512465224653246542465524656246572465824659246602466124662246632466424665246662466724668246692467024671246722467324674246752467624677246782467924680246812468224683246842468524686246872468824689246902469124692246932469424695246962469724698246992470024701247022470324704247052470624707247082470924710247112471224713247142471524716247172471824719247202472124722247232472424725247262472724728247292473024731247322473324734247352473624737247382473924740247412474224743247442474524746247472474824749247502475124752247532475424755247562475724758247592476024761247622476324764247652476624767247682476924770247712477224773247742477524776247772477824779247802478124782247832478424785247862478724788247892479024791247922479324794247952479624797247982479924800248012480224803248042480524806248072480824809248102481124812248132481424815248162481724818248192482024821248222482324824248252482624827248282482924830248312483224833248342483524836248372483824839248402484124842248432484424845248462484724848248492485024851248522485324854248552485624857248582485924860248612486224863248642486524866248672486824869248702487124872248732487424875248762487724878248792488024881248822488324884248852488624887248882488924890248912489224893248942489524896248972489824899249002490124902249032490424905249062490724908249092491024911249122491324914249152491624917249182491924920249212492224923249242492524926249272492824929249302493124932249332493424935249362493724938249392494024941249422494324944249452494624947249482494924950249512495224953249542495524956249572495824959249602496124962249632496424965249662496724968249692497024971249722497324974249752497624977249782497924980249812498224983249842498524986249872498824989249902499124992249932499424995249962499724998249992500025001250022500325004250052500625007250082500925010250112501225013250142501525016250172501825019250202502125022250232502425025250262502725028250292503025031250322503325034250352503625037250382503925040250412504225043250442504525046250472504825049250502505125052250532505425055250562505725058250592506025061250622506325064250652506625067250682506925070250712507225073250742507525076250772507825079250802508125082250832508425085250862508725088250892509025091250922509325094250952509625097250982509925100251012510225103251042510525106251072510825109251102511125112251132511425115251162511725118251192512025121251222512325124251252512625127251282512925130251312513225133251342513525136251372513825139251402514125142251432514425145251462514725148251492515025151251522515325154251552515625157251582515925160251612516225163251642516525166251672516825169251702517125172251732517425175251762517725178251792518025181251822518325184251852518625187251882518925190251912519225193251942519525196251972519825199252002520125202252032520425205252062520725208252092521025211252122521325214252152521625217252182521925220252212522225223252242522525226252272522825229252302523125232252332523425235252362523725238252392524025241252422524325244252452524625247252482524925250252512525225253252542525525256252572525825259252602526125262252632526425265252662526725268252692527025271252722527325274252752527625277252782527925280252812528225283252842528525286252872528825289252902529125292252932529425295252962529725298252992530025301253022530325304253052530625307253082530925310253112531225313253142531525316253172531825319253202532125322253232532425325253262532725328253292533025331253322533325334253352533625337253382533925340253412534225343253442534525346253472534825349253502535125352253532535425355253562535725358253592536025361253622536325364253652536625367253682536925370253712537225373253742537525376253772537825379253802538125382253832538425385253862538725388253892539025391253922539325394253952539625397253982539925400254012540225403254042540525406254072540825409254102541125412254132541425415254162541725418254192542025421254222542325424254252542625427254282542925430254312543225433254342543525436254372543825439254402544125442254432544425445254462544725448254492545025451254522545325454254552545625457254582545925460254612546225463254642546525466254672546825469254702547125472254732547425475254762547725478254792548025481254822548325484254852548625487254882548925490254912549225493254942549525496254972549825499255002550125502255032550425505255062550725508255092551025511255122551325514255152551625517255182551925520255212552225523255242552525526255272552825529255302553125532255332553425535255362553725538255392554025541255422554325544255452554625547255482554925550255512555225553255542555525556255572555825559255602556125562255632556425565255662556725568255692557025571255722557325574255752557625577255782557925580255812558225583255842558525586255872558825589255902559125592255932559425595255962559725598255992560025601256022560325604256052560625607256082560925610256112561225613256142561525616256172561825619256202562125622256232562425625256262562725628256292563025631256322563325634256352563625637256382563925640256412564225643256442564525646256472564825649256502565125652256532565425655256562565725658256592566025661256622566325664256652566625667256682566925670256712567225673256742567525676256772567825679256802568125682256832568425685256862568725688256892569025691256922569325694256952569625697256982569925700257012570225703257042570525706257072570825709257102571125712257132571425715257162571725718257192572025721257222572325724257252572625727257282572925730257312573225733257342573525736257372573825739257402574125742257432574425745257462574725748257492575025751257522575325754257552575625757257582575925760257612576225763257642576525766257672576825769257702577125772257732577425775257762577725778257792578025781257822578325784257852578625787257882578925790257912579225793257942579525796257972579825799258002580125802258032580425805258062580725808258092581025811258122581325814258152581625817258182581925820258212582225823258242582525826258272582825829258302583125832258332583425835258362583725838258392584025841258422584325844258452584625847258482584925850258512585225853258542585525856258572585825859258602586125862258632586425865258662586725868258692587025871258722587325874258752587625877258782587925880258812588225883258842588525886258872588825889258902589125892258932589425895258962589725898258992590025901259022590325904259052590625907259082590925910259112591225913259142591525916259172591825919259202592125922259232592425925259262592725928259292593025931259322593325934259352593625937259382593925940259412594225943259442594525946259472594825949259502595125952259532595425955259562595725958259592596025961259622596325964259652596625967259682596925970259712597225973259742597525976259772597825979259802598125982259832598425985259862598725988259892599025991259922599325994259952599625997259982599926000260012600226003260042600526006260072600826009260102601126012260132601426015260162601726018260192602026021260222602326024260252602626027260282602926030260312603226033260342603526036260372603826039260402604126042260432604426045260462604726048260492605026051260522605326054260552605626057260582605926060260612606226063260642606526066260672606826069260702607126072260732607426075260762607726078260792608026081260822608326084260852608626087260882608926090260912609226093260942609526096260972609826099261002610126102261032610426105261062610726108261092611026111261122611326114261152611626117261182611926120261212612226123261242612526126261272612826129261302613126132261332613426135261362613726138261392614026141261422614326144261452614626147261482614926150261512615226153261542615526156261572615826159261602616126162261632616426165261662616726168261692617026171261722617326174261752617626177261782617926180261812618226183261842618526186261872618826189261902619126192261932619426195261962619726198261992620026201262022620326204262052620626207262082620926210262112621226213262142621526216262172621826219262202622126222262232622426225262262622726228262292623026231262322623326234262352623626237262382623926240262412624226243262442624526246262472624826249262502625126252262532625426255262562625726258262592626026261262622626326264262652626626267262682626926270262712627226273262742627526276262772627826279262802628126282262832628426285262862628726288262892629026291262922629326294262952629626297262982629926300263012630226303263042630526306263072630826309263102631126312263132631426315263162631726318263192632026321263222632326324263252632626327263282632926330263312633226333263342633526336263372633826339263402634126342263432634426345263462634726348263492635026351263522635326354263552635626357263582635926360263612636226363263642636526366263672636826369263702637126372263732637426375263762637726378263792638026381263822638326384263852638626387263882638926390263912639226393263942639526396263972639826399264002640126402264032640426405264062640726408264092641026411264122641326414264152641626417264182641926420264212642226423264242642526426264272642826429264302643126432264332643426435264362643726438264392644026441264422644326444264452644626447264482644926450264512645226453264542645526456264572645826459264602646126462264632646426465264662646726468264692647026471264722647326474264752647626477264782647926480264812648226483264842648526486264872648826489264902649126492264932649426495264962649726498264992650026501265022650326504265052650626507265082650926510265112651226513265142651526516265172651826519265202652126522265232652426525265262652726528265292653026531265322653326534265352653626537265382653926540265412654226543265442654526546265472654826549265502655126552265532655426555265562655726558265592656026561265622656326564265652656626567265682656926570265712657226573265742657526576265772657826579265802658126582265832658426585265862658726588265892659026591265922659326594265952659626597265982659926600266012660226603266042660526606266072660826609266102661126612266132661426615266162661726618266192662026621266222662326624266252662626627266282662926630266312663226633266342663526636266372663826639266402664126642266432664426645266462664726648266492665026651266522665326654266552665626657266582665926660266612666226663266642666526666266672666826669266702667126672266732667426675266762667726678266792668026681266822668326684266852668626687266882668926690266912669226693266942669526696266972669826699267002670126702267032670426705267062670726708267092671026711267122671326714267152671626717267182671926720267212672226723267242672526726267272672826729267302673126732267332673426735267362673726738267392674026741267422674326744267452674626747267482674926750267512675226753267542675526756267572675826759267602676126762267632676426765267662676726768267692677026771267722677326774267752677626777267782677926780267812678226783267842678526786267872678826789267902679126792267932679426795267962679726798267992680026801268022680326804268052680626807268082680926810268112681226813268142681526816268172681826819268202682126822268232682426825268262682726828268292683026831268322683326834268352683626837268382683926840268412684226843268442684526846268472684826849268502685126852268532685426855268562685726858268592686026861268622686326864268652686626867268682686926870268712687226873268742687526876268772687826879268802688126882268832688426885268862688726888268892689026891268922689326894268952689626897268982689926900269012690226903269042690526906269072690826909269102691126912269132691426915269162691726918269192692026921269222692326924269252692626927269282692926930269312693226933269342693526936269372693826939269402694126942269432694426945269462694726948269492695026951269522695326954269552695626957269582695926960269612696226963269642696526966269672696826969269702697126972269732697426975269762697726978269792698026981269822698326984269852698626987269882698926990269912699226993269942699526996269972699826999270002700127002270032700427005270062700727008270092701027011270122701327014270152701627017270182701927020270212702227023270242702527026270272702827029270302703127032270332703427035270362703727038270392704027041270422704327044270452704627047270482704927050270512705227053270542705527056270572705827059270602706127062270632706427065270662706727068270692707027071270722707327074270752707627077270782707927080270812708227083270842708527086270872708827089270902709127092270932709427095270962709727098270992710027101271022710327104271052710627107271082710927110271112711227113271142711527116271172711827119271202712127122271232712427125271262712727128271292713027131271322713327134271352713627137271382713927140271412714227143271442714527146271472714827149271502715127152271532715427155271562715727158271592716027161271622716327164271652716627167271682716927170271712717227173271742717527176271772717827179271802718127182271832718427185271862718727188271892719027191271922719327194271952719627197271982719927200272012720227203272042720527206272072720827209272102721127212272132721427215272162721727218272192722027221272222722327224272252722627227272282722927230272312723227233272342723527236272372723827239272402724127242272432724427245272462724727248272492725027251272522725327254272552725627257272582725927260272612726227263272642726527266272672726827269272702727127272272732727427275272762727727278272792728027281272822728327284272852728627287272882728927290272912729227293272942729527296272972729827299273002730127302273032730427305273062730727308273092731027311273122731327314273152731627317273182731927320273212732227323273242732527326273272732827329273302733127332273332733427335273362733727338273392734027341273422734327344273452734627347273482734927350273512735227353273542735527356273572735827359273602736127362273632736427365273662736727368273692737027371273722737327374273752737627377273782737927380273812738227383273842738527386273872738827389273902739127392273932739427395273962739727398273992740027401274022740327404274052740627407274082740927410274112741227413274142741527416274172741827419274202742127422274232742427425274262742727428274292743027431274322743327434274352743627437274382743927440274412744227443274442744527446274472744827449274502745127452274532745427455274562745727458274592746027461274622746327464274652746627467274682746927470274712747227473274742747527476274772747827479274802748127482274832748427485274862748727488274892749027491274922749327494274952749627497274982749927500275012750227503275042750527506275072750827509275102751127512275132751427515275162751727518275192752027521275222752327524275252752627527275282752927530275312753227533275342753527536275372753827539275402754127542275432754427545275462754727548275492755027551275522755327554275552755627557275582755927560275612756227563275642756527566275672756827569275702757127572275732757427575275762757727578275792758027581275822758327584275852758627587275882758927590275912759227593275942759527596275972759827599276002760127602276032760427605276062760727608276092761027611276122761327614276152761627617276182761927620276212762227623276242762527626276272762827629276302763127632276332763427635276362763727638276392764027641276422764327644276452764627647276482764927650276512765227653276542765527656276572765827659276602766127662276632766427665276662766727668276692767027671276722767327674276752767627677276782767927680276812768227683276842768527686276872768827689276902769127692276932769427695276962769727698276992770027701277022770327704277052770627707277082770927710277112771227713277142771527716277172771827719277202772127722277232772427725277262772727728277292773027731277322773327734277352773627737277382773927740277412774227743277442774527746277472774827749277502775127752277532775427755277562775727758277592776027761277622776327764277652776627767277682776927770277712777227773277742777527776277772777827779277802778127782277832778427785277862778727788277892779027791277922779327794277952779627797277982779927800278012780227803278042780527806278072780827809278102781127812278132781427815278162781727818278192782027821278222782327824278252782627827278282782927830278312783227833278342783527836278372783827839278402784127842278432784427845278462784727848278492785027851278522785327854278552785627857278582785927860278612786227863278642786527866278672786827869278702787127872278732787427875278762787727878278792788027881278822788327884278852788627887278882788927890278912789227893278942789527896278972789827899279002790127902279032790427905279062790727908279092791027911279122791327914279152791627917279182791927920279212792227923279242792527926279272792827929279302793127932279332793427935279362793727938279392794027941279422794327944279452794627947279482794927950279512795227953279542795527956279572795827959279602796127962279632796427965279662796727968279692797027971279722797327974279752797627977279782797927980279812798227983279842798527986279872798827989279902799127992279932799427995279962799727998279992800028001280022800328004280052800628007280082800928010280112801228013280142801528016280172801828019280202802128022280232802428025280262802728028280292803028031280322803328034280352803628037280382803928040280412804228043280442804528046280472804828049280502805128052280532805428055280562805728058280592806028061280622806328064280652806628067280682806928070280712807228073280742807528076280772807828079280802808128082280832808428085280862808728088280892809028091280922809328094280952809628097280982809928100281012810228103281042810528106281072810828109281102811128112281132811428115281162811728118281192812028121281222812328124281252812628127281282812928130281312813228133281342813528136281372813828139281402814128142281432814428145281462814728148281492815028151281522815328154281552815628157281582815928160281612816228163281642816528166281672816828169281702817128172281732817428175281762817728178281792818028181281822818328184281852818628187281882818928190281912819228193281942819528196281972819828199282002820128202282032820428205282062820728208282092821028211282122821328214282152821628217282182821928220282212822228223282242822528226282272822828229282302823128232282332823428235282362823728238282392824028241282422824328244282452824628247282482824928250282512825228253282542825528256282572825828259282602826128262282632826428265282662826728268282692827028271282722827328274282752827628277282782827928280282812828228283282842828528286282872828828289282902829128292282932829428295282962829728298282992830028301283022830328304283052830628307283082830928310283112831228313283142831528316283172831828319283202832128322283232832428325283262832728328283292833028331283322833328334283352833628337283382833928340283412834228343283442834528346283472834828349283502835128352283532835428355283562835728358283592836028361283622836328364283652836628367283682836928370283712837228373283742837528376283772837828379283802838128382283832838428385283862838728388283892839028391283922839328394283952839628397283982839928400284012840228403284042840528406284072840828409284102841128412284132841428415284162841728418284192842028421284222842328424284252842628427284282842928430284312843228433284342843528436284372843828439284402844128442284432844428445284462844728448284492845028451284522845328454284552845628457284582845928460284612846228463284642846528466284672846828469284702847128472284732847428475284762847728478284792848028481284822848328484284852848628487284882848928490284912849228493284942849528496284972849828499285002850128502285032850428505285062850728508285092851028511285122851328514285152851628517285182851928520285212852228523285242852528526285272852828529285302853128532285332853428535285362853728538285392854028541285422854328544285452854628547285482854928550285512855228553285542855528556285572855828559285602856128562285632856428565285662856728568285692857028571285722857328574285752857628577285782857928580285812858228583285842858528586285872858828589285902859128592285932859428595285962859728598285992860028601286022860328604286052860628607286082860928610286112861228613286142861528616286172861828619286202862128622286232862428625286262862728628286292863028631286322863328634286352863628637286382863928640286412864228643286442864528646286472864828649286502865128652286532865428655286562865728658286592866028661286622866328664286652866628667286682866928670286712867228673286742867528676286772867828679286802868128682286832868428685286862868728688286892869028691286922869328694286952869628697286982869928700287012870228703287042870528706287072870828709287102871128712287132871428715287162871728718287192872028721287222872328724287252872628727287282872928730287312873228733287342873528736287372873828739287402874128742287432874428745287462874728748287492875028751287522875328754287552875628757287582875928760287612876228763287642876528766287672876828769287702877128772287732877428775287762877728778287792878028781287822878328784287852878628787287882878928790287912879228793287942879528796287972879828799288002880128802288032880428805288062880728808288092881028811288122881328814288152881628817288182881928820288212882228823288242882528826288272882828829288302883128832288332883428835288362883728838288392884028841288422884328844288452884628847288482884928850288512885228853288542885528856288572885828859288602886128862288632886428865288662886728868288692887028871288722887328874288752887628877288782887928880288812888228883288842888528886288872888828889288902889128892288932889428895288962889728898288992890028901289022890328904289052890628907289082890928910289112891228913289142891528916289172891828919289202892128922289232892428925289262892728928289292893028931289322893328934289352893628937289382893928940289412894228943289442894528946289472894828949289502895128952289532895428955289562895728958289592896028961289622896328964289652896628967289682896928970289712897228973289742897528976289772897828979289802898128982289832898428985289862898728988289892899028991289922899328994289952899628997289982899929000290012900229003290042900529006290072900829009290102901129012290132901429015290162901729018290192902029021290222902329024290252902629027290282902929030290312903229033290342903529036290372903829039290402904129042290432904429045290462904729048290492905029051290522905329054290552905629057290582905929060290612906229063290642906529066290672906829069290702907129072290732907429075290762907729078290792908029081290822908329084290852908629087290882908929090290912909229093290942909529096290972909829099291002910129102291032910429105291062910729108291092911029111291122911329114291152911629117291182911929120291212912229123291242912529126291272912829129291302913129132291332913429135291362913729138291392914029141291422914329144291452914629147291482914929150291512915229153291542915529156291572915829159291602916129162291632916429165291662916729168291692917029171291722917329174291752917629177291782917929180291812918229183291842918529186291872918829189291902919129192291932919429195291962919729198291992920029201292022920329204292052920629207292082920929210292112921229213292142921529216292172921829219292202922129222292232922429225292262922729228292292923029231292322923329234292352923629237292382923929240292412924229243292442924529246292472924829249292502925129252292532925429255292562925729258292592926029261292622926329264292652926629267292682926929270292712927229273292742927529276292772927829279292802928129282292832928429285292862928729288292892929029291292922929329294292952929629297292982929929300293012930229303293042930529306293072930829309293102931129312293132931429315293162931729318293192932029321293222932329324293252932629327293282932929330293312933229333293342933529336293372933829339293402934129342293432934429345293462934729348293492935029351293522935329354293552935629357293582935929360293612936229363293642936529366293672936829369293702937129372293732937429375293762937729378293792938029381293822938329384293852938629387293882938929390293912939229393293942939529396293972939829399294002940129402294032940429405294062940729408294092941029411294122941329414294152941629417294182941929420294212942229423294242942529426294272942829429294302943129432294332943429435294362943729438294392944029441294422944329444294452944629447294482944929450294512945229453294542945529456294572945829459294602946129462294632946429465294662946729468294692947029471294722947329474294752947629477294782947929480294812948229483294842948529486294872948829489294902949129492294932949429495294962949729498294992950029501295022950329504295052950629507295082950929510295112951229513295142951529516295172951829519295202952129522295232952429525295262952729528295292953029531295322953329534295352953629537295382953929540295412954229543295442954529546295472954829549295502955129552295532955429555295562955729558295592956029561295622956329564295652956629567295682956929570295712957229573295742957529576295772957829579295802958129582295832958429585295862958729588295892959029591295922959329594295952959629597295982959929600296012960229603296042960529606296072960829609296102961129612296132961429615296162961729618296192962029621296222962329624296252962629627296282962929630296312963229633296342963529636296372963829639296402964129642296432964429645296462964729648296492965029651296522965329654296552965629657296582965929660296612966229663296642966529666296672966829669296702967129672296732967429675296762967729678296792968029681296822968329684296852968629687296882968929690296912969229693296942969529696296972969829699297002970129702297032970429705297062970729708297092971029711297122971329714297152971629717297182971929720297212972229723297242972529726297272972829729297302973129732297332973429735297362973729738297392974029741297422974329744297452974629747297482974929750297512975229753297542975529756297572975829759297602976129762297632976429765297662976729768297692977029771297722977329774297752977629777297782977929780297812978229783297842978529786297872978829789297902979129792297932979429795297962979729798297992980029801298022980329804298052980629807298082980929810298112981229813298142981529816298172981829819298202982129822298232982429825298262982729828298292983029831298322983329834298352983629837298382983929840298412984229843298442984529846298472984829849298502985129852298532985429855298562985729858298592986029861298622986329864298652986629867298682986929870298712987229873298742987529876298772987829879298802988129882298832988429885298862988729888298892989029891298922989329894298952989629897298982989929900299012990229903299042990529906299072990829909299102991129912299132991429915299162991729918299192992029921299222992329924299252992629927299282992929930299312993229933299342993529936299372993829939299402994129942299432994429945299462994729948299492995029951299522995329954299552995629957299582995929960299612996229963299642996529966299672996829969299702997129972299732997429975299762997729978299792998029981299822998329984299852998629987299882998929990299912999229993299942999529996299972999829999300003000130002300033000430005300063000730008300093001030011300123001330014300153001630017300183001930020300213002230023300243002530026300273002830029300303003130032300333003430035300363003730038300393004030041300423004330044300453004630047300483004930050300513005230053300543005530056300573005830059300603006130062300633006430065300663006730068300693007030071300723007330074300753007630077300783007930080300813008230083300843008530086300873008830089300903009130092300933009430095300963009730098300993010030101301023010330104301053010630107301083010930110301113011230113301143011530116301173011830119301203012130122301233012430125301263012730128301293013030131301323013330134301353013630137301383013930140301413014230143301443014530146301473014830149301503015130152301533015430155301563015730158301593016030161301623016330164301653016630167301683016930170301713017230173301743017530176301773017830179301803018130182301833018430185301863018730188301893019030191301923019330194301953019630197301983019930200302013020230203302043020530206302073020830209302103021130212302133021430215302163021730218302193022030221302223022330224302253022630227302283022930230302313023230233302343023530236302373023830239302403024130242302433024430245302463024730248302493025030251302523025330254302553025630257302583025930260302613026230263302643026530266302673026830269302703027130272302733027430275302763027730278302793028030281302823028330284302853028630287302883028930290302913029230293302943029530296302973029830299303003030130302303033030430305303063030730308303093031030311303123031330314303153031630317303183031930320303213032230323303243032530326303273032830329303303033130332303333033430335303363033730338303393034030341303423034330344303453034630347303483034930350303513035230353303543035530356303573035830359303603036130362303633036430365303663036730368303693037030371303723037330374303753037630377303783037930380303813038230383303843038530386303873038830389303903039130392303933039430395303963039730398303993040030401304023040330404304053040630407304083040930410304113041230413304143041530416304173041830419304203042130422304233042430425304263042730428304293043030431304323043330434304353043630437304383043930440304413044230443304443044530446304473044830449304503045130452304533045430455304563045730458304593046030461304623046330464304653046630467304683046930470304713047230473304743047530476304773047830479304803048130482304833048430485304863048730488304893049030491304923049330494304953049630497304983049930500305013050230503305043050530506305073050830509305103051130512305133051430515305163051730518305193052030521305223052330524305253052630527305283052930530305313053230533305343053530536305373053830539305403054130542305433054430545305463054730548305493055030551305523055330554305553055630557305583055930560305613056230563305643056530566305673056830569305703057130572305733057430575305763057730578305793058030581305823058330584305853058630587305883058930590305913059230593305943059530596305973059830599306003060130602306033060430605306063060730608306093061030611306123061330614306153061630617306183061930620306213062230623306243062530626306273062830629306303063130632306333063430635306363063730638306393064030641306423064330644306453064630647306483064930650306513065230653306543065530656306573065830659306603066130662306633066430665306663066730668306693067030671306723067330674306753067630677306783067930680306813068230683306843068530686306873068830689306903069130692306933069430695306963069730698306993070030701307023070330704307053070630707307083070930710307113071230713307143071530716307173071830719307203072130722307233072430725307263072730728307293073030731307323073330734307353073630737307383073930740307413074230743307443074530746307473074830749307503075130752307533075430755307563075730758307593076030761307623076330764307653076630767307683076930770307713077230773307743077530776307773077830779307803078130782307833078430785307863078730788307893079030791307923079330794307953079630797307983079930800308013080230803308043080530806308073080830809308103081130812308133081430815308163081730818308193082030821308223082330824308253082630827308283082930830308313083230833308343083530836308373083830839308403084130842308433084430845308463084730848308493085030851308523085330854308553085630857308583085930860308613086230863308643086530866308673086830869308703087130872308733087430875308763087730878308793088030881308823088330884308853088630887308883088930890308913089230893308943089530896308973089830899309003090130902309033090430905309063090730908309093091030911309123091330914309153091630917309183091930920309213092230923309243092530926309273092830929309303093130932309333093430935309363093730938309393094030941309423094330944309453094630947309483094930950309513095230953309543095530956309573095830959309603096130962309633096430965309663096730968309693097030971309723097330974309753097630977309783097930980309813098230983309843098530986309873098830989309903099130992309933099430995309963099730998309993100031001310023100331004310053100631007310083100931010310113101231013310143101531016310173101831019310203102131022310233102431025310263102731028310293103031031310323103331034310353103631037310383103931040310413104231043310443104531046310473104831049310503105131052310533105431055310563105731058310593106031061310623106331064310653106631067310683106931070310713107231073310743107531076310773107831079310803108131082310833108431085310863108731088310893109031091310923109331094310953109631097310983109931100311013110231103311043110531106311073110831109311103111131112311133111431115311163111731118311193112031121311223112331124311253112631127311283112931130311313113231133311343113531136311373113831139311403114131142311433114431145311463114731148311493115031151311523115331154311553115631157311583115931160311613116231163311643116531166311673116831169311703117131172311733117431175311763117731178311793118031181311823118331184311853118631187311883118931190311913119231193311943119531196311973119831199312003120131202312033120431205312063120731208312093121031211312123121331214312153121631217312183121931220312213122231223312243122531226312273122831229312303123131232312333123431235312363123731238312393124031241312423124331244312453124631247312483124931250312513125231253312543125531256312573125831259312603126131262312633126431265312663126731268312693127031271312723127331274312753127631277312783127931280312813128231283312843128531286312873128831289312903129131292312933129431295312963129731298312993130031301313023130331304313053130631307313083130931310313113131231313313143131531316313173131831319313203132131322313233132431325313263132731328313293133031331313323133331334313353133631337313383133931340313413134231343313443134531346313473134831349313503135131352313533135431355313563135731358313593136031361313623136331364313653136631367313683136931370313713137231373313743137531376313773137831379313803138131382313833138431385313863138731388313893139031391313923139331394313953139631397313983139931400314013140231403314043140531406314073140831409314103141131412314133141431415314163141731418314193142031421314223142331424314253142631427314283142931430314313143231433314343143531436314373143831439314403144131442314433144431445314463144731448314493145031451314523145331454314553145631457314583145931460314613146231463314643146531466314673146831469314703147131472314733147431475314763147731478314793148031481314823148331484314853148631487314883148931490314913149231493314943149531496314973149831499315003150131502315033150431505315063150731508315093151031511315123151331514315153151631517315183151931520315213152231523315243152531526315273152831529315303153131532315333153431535315363153731538315393154031541315423154331544315453154631547315483154931550315513155231553315543155531556315573155831559315603156131562315633156431565315663156731568315693157031571315723157331574315753157631577315783157931580315813158231583315843158531586315873158831589315903159131592315933159431595315963159731598315993160031601316023160331604316053160631607316083160931610316113161231613316143161531616316173161831619316203162131622316233162431625316263162731628316293163031631316323163331634316353163631637316383163931640316413164231643316443164531646316473164831649316503165131652316533165431655316563165731658316593166031661316623166331664316653166631667316683166931670316713167231673316743167531676316773167831679316803168131682316833168431685316863168731688316893169031691316923169331694316953169631697316983169931700317013170231703317043170531706317073170831709317103171131712317133171431715317163171731718317193172031721317223172331724317253172631727317283172931730317313173231733317343173531736317373173831739317403174131742317433174431745317463174731748317493175031751317523175331754317553175631757317583175931760317613176231763317643176531766317673176831769317703177131772317733177431775317763177731778317793178031781317823178331784317853178631787317883178931790317913179231793317943179531796317973179831799318003180131802318033180431805318063180731808318093181031811318123181331814318153181631817318183181931820318213182231823318243182531826318273182831829318303183131832318333183431835318363183731838318393184031841318423184331844318453184631847318483184931850318513185231853318543185531856318573185831859318603186131862318633186431865318663186731868318693187031871318723187331874318753187631877318783187931880318813188231883318843188531886318873188831889318903189131892318933189431895318963189731898318993190031901319023190331904319053190631907319083190931910319113191231913319143191531916319173191831919319203192131922319233192431925319263192731928319293193031931319323193331934319353193631937319383193931940319413194231943319443194531946319473194831949319503195131952319533195431955319563195731958319593196031961319623196331964319653196631967319683196931970319713197231973319743197531976319773197831979319803198131982319833198431985319863198731988319893199031991319923199331994319953199631997319983199932000320013200232003320043200532006320073200832009320103201132012320133201432015320163201732018320193202032021320223202332024320253202632027320283202932030320313203232033320343203532036320373203832039320403204132042320433204432045320463204732048320493205032051320523205332054320553205632057320583205932060320613206232063320643206532066320673206832069320703207132072320733207432075320763207732078320793208032081320823208332084320853208632087320883208932090320913209232093320943209532096320973209832099321003210132102321033210432105321063210732108321093211032111321123211332114321153211632117321183211932120321213212232123321243212532126321273212832129321303213132132321333213432135321363213732138321393214032141321423214332144321453214632147321483214932150321513215232153321543215532156321573215832159321603216132162321633216432165321663216732168321693217032171321723217332174321753217632177321783217932180321813218232183321843218532186321873218832189321903219132192321933219432195321963219732198321993220032201322023220332204322053220632207322083220932210322113221232213322143221532216322173221832219322203222132222322233222432225322263222732228322293223032231322323223332234322353223632237322383223932240322413224232243322443224532246322473224832249322503225132252322533225432255322563225732258322593226032261322623226332264322653226632267322683226932270322713227232273322743227532276322773227832279322803228132282322833228432285322863228732288322893229032291322923229332294322953229632297322983229932300323013230232303323043230532306323073230832309323103231132312323133231432315323163231732318323193232032321323223232332324323253232632327323283232932330323313233232333323343233532336323373233832339323403234132342323433234432345323463234732348323493235032351323523235332354323553235632357323583235932360323613236232363323643236532366323673236832369323703237132372323733237432375323763237732378323793238032381323823238332384323853238632387323883238932390323913239232393323943239532396323973239832399324003240132402324033240432405324063240732408324093241032411324123241332414324153241632417324183241932420324213242232423324243242532426324273242832429324303243132432324333243432435324363243732438324393244032441324423244332444324453244632447324483244932450324513245232453324543245532456324573245832459324603246132462324633246432465324663246732468324693247032471324723247332474324753247632477324783247932480324813248232483324843248532486324873248832489324903249132492324933249432495324963249732498324993250032501325023250332504325053250632507325083250932510325113251232513325143251532516325173251832519325203252132522325233252432525325263252732528325293253032531325323253332534325353253632537325383253932540325413254232543325443254532546325473254832549325503255132552325533255432555325563255732558325593256032561325623256332564325653256632567325683256932570325713257232573325743257532576325773257832579325803258132582325833258432585325863258732588325893259032591325923259332594325953259632597325983259932600326013260232603326043260532606326073260832609326103261132612326133261432615326163261732618326193262032621326223262332624326253262632627326283262932630326313263232633326343263532636326373263832639326403264132642326433264432645326463264732648326493265032651326523265332654326553265632657326583265932660326613266232663326643266532666326673266832669326703267132672326733267432675326763267732678326793268032681326823268332684326853268632687326883268932690326913269232693326943269532696326973269832699327003270132702327033270432705327063270732708327093271032711327123271332714327153271632717327183271932720327213272232723327243272532726327273272832729327303273132732327333273432735327363273732738327393274032741327423274332744327453274632747327483274932750327513275232753327543275532756327573275832759327603276132762327633276432765327663276732768327693277032771327723277332774327753277632777327783277932780327813278232783327843278532786327873278832789327903279132792327933279432795327963279732798327993280032801328023280332804328053280632807328083280932810328113281232813328143281532816328173281832819328203282132822328233282432825328263282732828328293283032831328323283332834328353283632837328383283932840328413284232843328443284532846328473284832849328503285132852328533285432855328563285732858328593286032861328623286332864328653286632867328683286932870328713287232873328743287532876328773287832879328803288132882328833288432885328863288732888328893289032891328923289332894328953289632897328983289932900329013290232903329043290532906329073290832909329103291132912329133291432915329163291732918329193292032921329223292332924329253292632927329283292932930329313293232933329343293532936329373293832939329403294132942329433294432945329463294732948329493295032951329523295332954329553295632957329583295932960329613296232963329643296532966329673296832969329703297132972329733297432975329763297732978329793298032981329823298332984329853298632987329883298932990329913299232993329943299532996329973299832999330003300133002330033300433005330063300733008330093301033011330123301333014330153301633017330183301933020330213302233023330243302533026330273302833029330303303133032330333303433035330363303733038330393304033041330423304333044330453304633047330483304933050330513305233053330543305533056330573305833059330603306133062330633306433065330663306733068330693307033071330723307333074330753307633077330783307933080330813308233083330843308533086330873308833089330903309133092330933309433095330963309733098330993310033101331023310333104331053310633107331083310933110331113311233113331143311533116331173311833119331203312133122331233312433125331263312733128331293313033131331323313333134331353313633137331383313933140331413314233143331443314533146331473314833149331503315133152331533315433155331563315733158331593316033161331623316333164331653316633167331683316933170331713317233173331743317533176331773317833179331803318133182331833318433185331863318733188331893319033191331923319333194331953319633197331983319933200332013320233203332043320533206332073320833209332103321133212332133321433215332163321733218332193322033221332223322333224332253322633227332283322933230332313323233233332343323533236332373323833239332403324133242332433324433245332463324733248332493325033251332523325333254332553325633257332583325933260332613326233263332643326533266332673326833269332703327133272332733327433275332763327733278332793328033281332823328333284332853328633287332883328933290332913329233293332943329533296332973329833299333003330133302333033330433305333063330733308333093331033311333123331333314333153331633317333183331933320333213332233323333243332533326333273332833329333303333133332333333333433335333363333733338333393334033341333423334333344333453334633347333483334933350333513335233353333543335533356333573335833359333603336133362333633336433365333663336733368333693337033371333723337333374333753337633377333783337933380333813338233383333843338533386333873338833389333903339133392333933339433395333963339733398333993340033401334023340333404334053340633407334083340933410334113341233413334143341533416334173341833419334203342133422334233342433425334263342733428334293343033431334323343333434334353343633437334383343933440334413344233443334443344533446334473344833449334503345133452334533345433455334563345733458334593346033461334623346333464334653346633467334683346933470334713347233473334743347533476334773347833479334803348133482334833348433485334863348733488334893349033491334923349333494334953349633497334983349933500335013350233503335043350533506335073350833509335103351133512335133351433515335163351733518335193352033521335223352333524335253352633527335283352933530335313353233533335343353533536335373353833539335403354133542335433354433545335463354733548335493355033551335523355333554335553355633557335583355933560335613356233563335643356533566335673356833569335703357133572335733357433575335763357733578335793358033581335823358333584335853358633587335883358933590335913359233593335943359533596335973359833599336003360133602336033360433605336063360733608336093361033611336123361333614336153361633617336183361933620336213362233623336243362533626336273362833629336303363133632336333363433635336363363733638336393364033641336423364333644336453364633647336483364933650336513365233653336543365533656336573365833659336603366133662336633366433665336663366733668336693367033671336723367333674336753367633677336783367933680336813368233683336843368533686336873368833689336903369133692336933369433695336963369733698336993370033701337023370333704337053370633707337083370933710337113371233713337143371533716337173371833719337203372133722337233372433725337263372733728337293373033731337323373333734337353373633737337383373933740337413374233743337443374533746337473374833749337503375133752337533375433755337563375733758337593376033761337623376333764337653376633767337683376933770337713377233773337743377533776337773377833779337803378133782337833378433785337863378733788337893379033791337923379333794337953379633797337983379933800338013380233803338043380533806338073380833809338103381133812338133381433815338163381733818338193382033821338223382333824338253382633827338283382933830338313383233833338343383533836338373383833839338403384133842338433384433845338463384733848338493385033851338523385333854338553385633857338583385933860338613386233863338643386533866338673386833869338703387133872338733387433875338763387733878338793388033881338823388333884338853388633887338883388933890338913389233893338943389533896338973389833899339003390133902339033390433905339063390733908339093391033911339123391333914339153391633917339183391933920339213392233923339243392533926339273392833929339303393133932339333393433935339363393733938339393394033941339423394333944339453394633947339483394933950339513395233953339543395533956339573395833959339603396133962339633396433965339663396733968339693397033971339723397333974339753397633977339783397933980339813398233983339843398533986339873398833989339903399133992339933399433995339963399733998339993400034001340023400334004340053400634007340083400934010340113401234013340143401534016340173401834019340203402134022340233402434025340263402734028340293403034031340323403334034340353403634037340383403934040340413404234043340443404534046340473404834049340503405134052340533405434055340563405734058340593406034061340623406334064340653406634067340683406934070340713407234073340743407534076340773407834079340803408134082340833408434085340863408734088340893409034091340923409334094340953409634097340983409934100341013410234103341043410534106341073410834109341103411134112341133411434115341163411734118341193412034121341223412334124341253412634127341283412934130341313413234133341343413534136341373413834139341403414134142341433414434145341463414734148341493415034151341523415334154341553415634157341583415934160341613416234163341643416534166341673416834169341703417134172341733417434175341763417734178341793418034181341823418334184341853418634187341883418934190341913419234193341943419534196341973419834199342003420134202342033420434205342063420734208342093421034211342123421334214342153421634217342183421934220342213422234223342243422534226342273422834229342303423134232342333423434235342363423734238342393424034241342423424334244342453424634247342483424934250342513425234253342543425534256342573425834259342603426134262342633426434265342663426734268342693427034271342723427334274342753427634277342783427934280342813428234283342843428534286342873428834289342903429134292342933429434295342963429734298342993430034301343023430334304343053430634307343083430934310343113431234313343143431534316343173431834319343203432134322343233432434325343263432734328343293433034331343323433334334343353433634337343383433934340343413434234343343443434534346343473434834349343503435134352343533435434355343563435734358343593436034361343623436334364343653436634367343683436934370343713437234373343743437534376343773437834379343803438134382343833438434385343863438734388343893439034391343923439334394343953439634397343983439934400344013440234403344043440534406344073440834409344103441134412344133441434415344163441734418344193442034421344223442334424344253442634427344283442934430344313443234433344343443534436344373443834439344403444134442344433444434445344463444734448344493445034451344523445334454344553445634457344583445934460344613446234463344643446534466344673446834469344703447134472344733447434475344763447734478344793448034481344823448334484344853448634487344883448934490344913449234493344943449534496344973449834499345003450134502345033450434505345063450734508345093451034511345123451334514345153451634517345183451934520345213452234523345243452534526345273452834529345303453134532345333453434535345363453734538345393454034541345423454334544345453454634547345483454934550345513455234553345543455534556345573455834559345603456134562345633456434565345663456734568345693457034571345723457334574345753457634577345783457934580345813458234583345843458534586345873458834589345903459134592345933459434595345963459734598345993460034601346023460334604346053460634607346083460934610346113461234613346143461534616346173461834619346203462134622346233462434625346263462734628346293463034631346323463334634346353463634637346383463934640346413464234643346443464534646346473464834649346503465134652346533465434655346563465734658346593466034661346623466334664346653466634667346683466934670346713467234673346743467534676346773467834679346803468134682346833468434685346863468734688346893469034691346923469334694346953469634697346983469934700347013470234703347043470534706347073470834709347103471134712347133471434715347163471734718347193472034721347223472334724347253472634727347283472934730347313473234733347343473534736347373473834739347403474134742347433474434745347463474734748347493475034751347523475334754347553475634757347583475934760347613476234763347643476534766347673476834769347703477134772347733477434775347763477734778347793478034781347823478334784347853478634787347883478934790347913479234793347943479534796347973479834799348003480134802348033480434805348063480734808348093481034811348123481334814348153481634817348183481934820348213482234823348243482534826348273482834829348303483134832348333483434835348363483734838348393484034841348423484334844348453484634847348483484934850348513485234853348543485534856348573485834859348603486134862348633486434865348663486734868348693487034871348723487334874348753487634877348783487934880348813488234883348843488534886348873488834889348903489134892348933489434895348963489734898348993490034901349023490334904349053490634907349083490934910349113491234913349143491534916349173491834919349203492134922349233492434925349263492734928349293493034931349323493334934349353493634937349383493934940349413494234943349443494534946349473494834949349503495134952349533495434955349563495734958349593496034961349623496334964349653496634967349683496934970349713497234973349743497534976349773497834979349803498134982349833498434985349863498734988349893499034991349923499334994349953499634997349983499935000350013500235003350043500535006350073500835009350103501135012350133501435015350163501735018350193502035021350223502335024350253502635027350283502935030350313503235033350343503535036350373503835039350403504135042350433504435045350463504735048350493505035051350523505335054350553505635057350583505935060350613506235063350643506535066350673506835069350703507135072350733507435075350763507735078350793508035081350823508335084350853508635087350883508935090350913509235093350943509535096350973509835099351003510135102351033510435105351063510735108351093511035111351123511335114351153511635117351183511935120351213512235123351243512535126351273512835129351303513135132351333513435135351363513735138351393514035141351423514335144351453514635147351483514935150351513515235153351543515535156351573515835159351603516135162351633516435165351663516735168351693517035171351723517335174351753517635177351783517935180351813518235183351843518535186351873518835189351903519135192351933519435195351963519735198351993520035201352023520335204352053520635207352083520935210352113521235213352143521535216352173521835219352203522135222352233522435225352263522735228352293523035231352323523335234352353523635237352383523935240352413524235243352443524535246352473524835249352503525135252352533525435255352563525735258352593526035261352623526335264352653526635267352683526935270352713527235273352743527535276352773527835279352803528135282352833528435285352863528735288352893529035291352923529335294352953529635297352983529935300353013530235303353043530535306353073530835309353103531135312353133531435315353163531735318353193532035321353223532335324353253532635327353283532935330353313533235333353343533535336353373533835339353403534135342353433534435345353463534735348353493535035351353523535335354353553535635357353583535935360353613536235363353643536535366353673536835369353703537135372353733537435375353763537735378353793538035381353823538335384353853538635387353883538935390353913539235393353943539535396353973539835399354003540135402354033540435405354063540735408354093541035411354123541335414354153541635417354183541935420354213542235423354243542535426354273542835429354303543135432354333543435435354363543735438354393544035441354423544335444354453544635447354483544935450354513545235453354543545535456354573545835459354603546135462354633546435465354663546735468354693547035471354723547335474354753547635477354783547935480354813548235483354843548535486354873548835489354903549135492354933549435495354963549735498354993550035501355023550335504355053550635507355083550935510355113551235513355143551535516355173551835519355203552135522355233552435525355263552735528355293553035531355323553335534355353553635537355383553935540355413554235543355443554535546355473554835549355503555135552355533555435555355563555735558355593556035561355623556335564355653556635567355683556935570355713557235573355743557535576355773557835579355803558135582355833558435585355863558735588355893559035591355923559335594355953559635597355983559935600356013560235603356043560535606356073560835609356103561135612356133561435615356163561735618356193562035621356223562335624356253562635627356283562935630356313563235633356343563535636356373563835639356403564135642356433564435645356463564735648356493565035651356523565335654356553565635657356583565935660356613566235663356643566535666356673566835669356703567135672356733567435675356763567735678356793568035681356823568335684356853568635687356883568935690356913569235693356943569535696356973569835699357003570135702357033570435705357063570735708357093571035711357123571335714357153571635717357183571935720357213572235723357243572535726357273572835729357303573135732357333573435735357363573735738357393574035741357423574335744357453574635747357483574935750357513575235753357543575535756357573575835759357603576135762357633576435765357663576735768357693577035771357723577335774357753577635777357783577935780357813578235783357843578535786357873578835789357903579135792357933579435795357963579735798357993580035801358023580335804358053580635807358083580935810358113581235813358143581535816358173581835819358203582135822358233582435825358263582735828358293583035831358323583335834358353583635837358383583935840358413584235843358443584535846358473584835849358503585135852358533585435855358563585735858358593586035861358623586335864358653586635867358683586935870358713587235873358743587535876358773587835879358803588135882358833588435885358863588735888358893589035891358923589335894358953589635897358983589935900359013590235903359043590535906359073590835909359103591135912359133591435915359163591735918359193592035921359223592335924359253592635927359283592935930359313593235933359343593535936359373593835939359403594135942359433594435945359463594735948359493595035951359523595335954359553595635957359583595935960359613596235963359643596535966359673596835969359703597135972359733597435975359763597735978359793598035981359823598335984359853598635987359883598935990359913599235993359943599535996359973599835999360003600136002360033600436005360063600736008360093601036011360123601336014360153601636017360183601936020360213602236023360243602536026360273602836029360303603136032360333603436035360363603736038360393604036041360423604336044360453604636047360483604936050360513605236053360543605536056360573605836059360603606136062360633606436065360663606736068360693607036071360723607336074360753607636077360783607936080360813608236083360843608536086360873608836089360903609136092360933609436095360963609736098360993610036101361023610336104361053610636107361083610936110361113611236113361143611536116361173611836119361203612136122361233612436125361263612736128361293613036131361323613336134361353613636137361383613936140361413614236143361443614536146361473614836149361503615136152361533615436155361563615736158361593616036161361623616336164361653616636167361683616936170361713617236173361743617536176361773617836179361803618136182361833618436185361863618736188361893619036191361923619336194361953619636197361983619936200362013620236203362043620536206362073620836209362103621136212362133621436215362163621736218362193622036221362223622336224362253622636227362283622936230362313623236233362343623536236362373623836239362403624136242362433624436245362463624736248362493625036251362523625336254362553625636257362583625936260362613626236263362643626536266362673626836269362703627136272362733627436275362763627736278362793628036281362823628336284362853628636287362883628936290362913629236293362943629536296362973629836299363003630136302363033630436305363063630736308363093631036311363123631336314363153631636317363183631936320363213632236323363243632536326363273632836329363303633136332363333633436335363363633736338363393634036341363423634336344363453634636347363483634936350363513635236353363543635536356363573635836359363603636136362363633636436365363663636736368363693637036371363723637336374363753637636377363783637936380363813638236383363843638536386363873638836389363903639136392363933639436395363963639736398363993640036401364023640336404364053640636407364083640936410364113641236413364143641536416364173641836419364203642136422364233642436425364263642736428364293643036431364323643336434364353643636437364383643936440364413644236443364443644536446364473644836449364503645136452364533645436455364563645736458364593646036461364623646336464364653646636467364683646936470364713647236473364743647536476364773647836479364803648136482364833648436485364863648736488364893649036491364923649336494364953649636497364983649936500365013650236503365043650536506365073650836509365103651136512365133651436515365163651736518365193652036521365223652336524365253652636527365283652936530365313653236533365343653536536365373653836539365403654136542365433654436545365463654736548365493655036551365523655336554365553655636557365583655936560365613656236563365643656536566365673656836569365703657136572365733657436575365763657736578365793658036581365823658336584365853658636587365883658936590365913659236593365943659536596365973659836599366003660136602366033660436605366063660736608366093661036611366123661336614366153661636617366183661936620366213662236623366243662536626366273662836629366303663136632366333663436635366363663736638366393664036641366423664336644366453664636647366483664936650366513665236653366543665536656366573665836659366603666136662366633666436665366663666736668366693667036671366723667336674366753667636677366783667936680366813668236683366843668536686366873668836689366903669136692366933669436695366963669736698366993670036701367023670336704367053670636707367083670936710367113671236713367143671536716367173671836719367203672136722367233672436725367263672736728367293673036731367323673336734367353673636737367383673936740367413674236743367443674536746367473674836749367503675136752367533675436755367563675736758367593676036761367623676336764367653676636767367683676936770367713677236773367743677536776367773677836779367803678136782367833678436785367863678736788367893679036791367923679336794367953679636797367983679936800368013680236803368043680536806368073680836809368103681136812368133681436815368163681736818368193682036821368223682336824368253682636827368283682936830368313683236833368343683536836368373683836839368403684136842368433684436845368463684736848368493685036851368523685336854368553685636857368583685936860368613686236863368643686536866368673686836869368703687136872368733687436875368763687736878368793688036881368823688336884368853688636887368883688936890368913689236893368943689536896368973689836899369003690136902369033690436905369063690736908369093691036911369123691336914369153691636917369183691936920369213692236923369243692536926369273692836929369303693136932369333693436935369363693736938369393694036941369423694336944369453694636947369483694936950369513695236953369543695536956369573695836959369603696136962369633696436965369663696736968369693697036971369723697336974369753697636977369783697936980369813698236983369843698536986369873698836989369903699136992369933699436995369963699736998369993700037001370023700337004370053700637007370083700937010370113701237013370143701537016370173701837019370203702137022370233702437025370263702737028370293703037031370323703337034370353703637037370383703937040370413704237043370443704537046370473704837049370503705137052370533705437055370563705737058370593706037061370623706337064370653706637067370683706937070370713707237073370743707537076370773707837079370803708137082370833708437085370863708737088370893709037091370923709337094370953709637097370983709937100371013710237103371043710537106371073710837109371103711137112371133711437115371163711737118371193712037121371223712337124371253712637127371283712937130371313713237133371343713537136371373713837139371403714137142371433714437145371463714737148371493715037151371523715337154371553715637157371583715937160371613716237163371643716537166371673716837169371703717137172371733717437175371763717737178371793718037181371823718337184371853718637187371883718937190371913719237193371943719537196371973719837199372003720137202372033720437205372063720737208372093721037211372123721337214372153721637217372183721937220372213722237223372243722537226372273722837229372303723137232372333723437235372363723737238372393724037241372423724337244372453724637247372483724937250372513725237253372543725537256372573725837259372603726137262372633726437265372663726737268372693727037271372723727337274372753727637277372783727937280372813728237283372843728537286372873728837289372903729137292372933729437295372963729737298372993730037301373023730337304373053730637307373083730937310373113731237313373143731537316373173731837319373203732137322373233732437325373263732737328373293733037331373323733337334373353733637337373383733937340373413734237343373443734537346373473734837349373503735137352373533735437355373563735737358373593736037361373623736337364373653736637367373683736937370373713737237373373743737537376373773737837379373803738137382373833738437385373863738737388373893739037391373923739337394373953739637397373983739937400374013740237403374043740537406374073740837409374103741137412374133741437415374163741737418374193742037421374223742337424374253742637427374283742937430374313743237433374343743537436374373743837439374403744137442374433744437445374463744737448374493745037451374523745337454374553745637457374583745937460374613746237463374643746537466374673746837469374703747137472374733747437475374763747737478374793748037481374823748337484374853748637487374883748937490374913749237493374943749537496374973749837499375003750137502375033750437505375063750737508375093751037511375123751337514375153751637517375183751937520375213752237523375243752537526375273752837529375303753137532375333753437535375363753737538375393754037541375423754337544375453754637547375483754937550375513755237553375543755537556375573755837559375603756137562375633756437565375663756737568375693757037571375723757337574375753757637577375783757937580375813758237583375843758537586375873758837589375903759137592375933759437595375963759737598375993760037601376023760337604376053760637607376083760937610376113761237613376143761537616376173761837619376203762137622376233762437625376263762737628376293763037631376323763337634376353763637637376383763937640376413764237643376443764537646376473764837649376503765137652376533765437655376563765737658376593766037661376623766337664376653766637667376683766937670376713767237673376743767537676376773767837679376803768137682376833768437685376863768737688376893769037691376923769337694376953769637697376983769937700377013770237703377043770537706377073770837709377103771137712377133771437715377163771737718377193772037721377223772337724377253772637727377283772937730377313773237733377343773537736377373773837739377403774137742377433774437745377463774737748377493775037751377523775337754377553775637757377583775937760377613776237763377643776537766377673776837769377703777137772377733777437775377763777737778377793778037781377823778337784377853778637787377883778937790377913779237793377943779537796377973779837799378003780137802378033780437805378063780737808378093781037811378123781337814378153781637817378183781937820378213782237823378243782537826378273782837829378303783137832378333783437835378363783737838378393784037841378423784337844378453784637847378483784937850378513785237853378543785537856378573785837859378603786137862378633786437865378663786737868378693787037871378723787337874378753787637877378783787937880378813788237883378843788537886378873788837889378903789137892378933789437895378963789737898378993790037901379023790337904379053790637907379083790937910379113791237913379143791537916379173791837919379203792137922379233792437925379263792737928379293793037931379323793337934379353793637937379383793937940379413794237943379443794537946379473794837949379503795137952379533795437955379563795737958379593796037961379623796337964379653796637967379683796937970379713797237973379743797537976379773797837979379803798137982379833798437985379863798737988379893799037991379923799337994379953799637997379983799938000380013800238003380043800538006380073800838009380103801138012380133801438015380163801738018380193802038021380223802338024380253802638027380283802938030380313803238033380343803538036380373803838039380403804138042380433804438045380463804738048380493805038051380523805338054380553805638057380583805938060380613806238063380643806538066380673806838069380703807138072380733807438075380763807738078380793808038081380823808338084380853808638087380883808938090380913809238093380943809538096380973809838099381003810138102381033810438105381063810738108381093811038111381123811338114381153811638117381183811938120381213812238123381243812538126381273812838129381303813138132381333813438135381363813738138381393814038141381423814338144381453814638147381483814938150381513815238153381543815538156381573815838159381603816138162381633816438165381663816738168381693817038171381723817338174381753817638177381783817938180381813818238183381843818538186381873818838189381903819138192381933819438195381963819738198381993820038201382023820338204382053820638207382083820938210382113821238213382143821538216382173821838219382203822138222382233822438225382263822738228382293823038231382323823338234382353823638237382383823938240382413824238243382443824538246382473824838249382503825138252382533825438255382563825738258382593826038261382623826338264382653826638267382683826938270382713827238273382743827538276382773827838279382803828138282382833828438285382863828738288382893829038291382923829338294382953829638297382983829938300383013830238303383043830538306383073830838309383103831138312383133831438315383163831738318383193832038321383223832338324383253832638327383283832938330383313833238333383343833538336383373833838339383403834138342383433834438345383463834738348383493835038351383523835338354383553835638357383583835938360383613836238363383643836538366383673836838369383703837138372383733837438375383763837738378383793838038381383823838338384383853838638387383883838938390383913839238393383943839538396383973839838399384003840138402384033840438405384063840738408384093841038411384123841338414384153841638417384183841938420384213842238423384243842538426384273842838429384303843138432384333843438435384363843738438384393844038441384423844338444384453844638447384483844938450384513845238453384543845538456384573845838459384603846138462384633846438465384663846738468384693847038471384723847338474384753847638477384783847938480384813848238483384843848538486384873848838489384903849138492384933849438495384963849738498384993850038501385023850338504385053850638507385083850938510385113851238513385143851538516385173851838519385203852138522385233852438525385263852738528385293853038531385323853338534385353853638537385383853938540385413854238543385443854538546385473854838549385503855138552385533855438555385563855738558385593856038561385623856338564385653856638567385683856938570385713857238573385743857538576385773857838579385803858138582385833858438585385863858738588385893859038591385923859338594385953859638597385983859938600386013860238603386043860538606386073860838609386103861138612386133861438615386163861738618386193862038621386223862338624386253862638627386283862938630386313863238633386343863538636386373863838639386403864138642386433864438645386463864738648386493865038651386523865338654386553865638657386583865938660386613866238663386643866538666386673866838669386703867138672386733867438675386763867738678386793868038681386823868338684386853868638687386883868938690386913869238693386943869538696386973869838699387003870138702387033870438705387063870738708387093871038711387123871338714387153871638717387183871938720387213872238723387243872538726387273872838729387303873138732387333873438735387363873738738387393874038741387423874338744387453874638747387483874938750387513875238753387543875538756387573875838759387603876138762387633876438765387663876738768387693877038771387723877338774387753877638777387783877938780387813878238783387843878538786387873878838789387903879138792387933879438795387963879738798387993880038801388023880338804388053880638807388083880938810388113881238813388143881538816388173881838819388203882138822388233882438825388263882738828388293883038831388323883338834388353883638837388383883938840388413884238843388443884538846388473884838849388503885138852388533885438855388563885738858388593886038861388623886338864388653886638867388683886938870388713887238873388743887538876388773887838879388803888138882388833888438885388863888738888388893889038891388923889338894388953889638897388983889938900389013890238903389043890538906389073890838909389103891138912389133891438915389163891738918389193892038921389223892338924389253892638927389283892938930389313893238933389343893538936389373893838939389403894138942389433894438945389463894738948389493895038951389523895338954389553895638957389583895938960389613896238963389643896538966389673896838969389703897138972389733897438975389763897738978389793898038981389823898338984389853898638987389883898938990389913899238993389943899538996389973899838999390003900139002390033900439005390063900739008390093901039011390123901339014390153901639017390183901939020390213902239023390243902539026390273902839029390303903139032390333903439035390363903739038390393904039041390423904339044390453904639047390483904939050390513905239053390543905539056390573905839059390603906139062390633906439065390663906739068390693907039071390723907339074390753907639077390783907939080390813908239083390843908539086390873908839089390903909139092390933909439095390963909739098390993910039101391023910339104391053910639107391083910939110391113911239113391143911539116391173911839119391203912139122391233912439125391263912739128391293913039131391323913339134391353913639137391383913939140391413914239143391443914539146391473914839149391503915139152391533915439155391563915739158391593916039161391623916339164391653916639167391683916939170391713917239173391743917539176391773917839179391803918139182391833918439185391863918739188391893919039191391923919339194391953919639197391983919939200392013920239203392043920539206392073920839209392103921139212392133921439215392163921739218392193922039221392223922339224392253922639227392283922939230392313923239233392343923539236392373923839239392403924139242392433924439245392463924739248392493925039251392523925339254392553925639257392583925939260392613926239263392643926539266392673926839269392703927139272392733927439275392763927739278392793928039281392823928339284392853928639287392883928939290392913929239293392943929539296392973929839299393003930139302393033930439305393063930739308393093931039311393123931339314393153931639317393183931939320393213932239323393243932539326393273932839329393303933139332393333933439335393363933739338393393934039341393423934339344393453934639347393483934939350393513935239353393543935539356393573935839359393603936139362393633936439365393663936739368393693937039371393723937339374393753937639377393783937939380393813938239383393843938539386393873938839389393903939139392393933939439395393963939739398393993940039401394023940339404394053940639407394083940939410394113941239413394143941539416394173941839419394203942139422394233942439425394263942739428394293943039431394323943339434394353943639437394383943939440394413944239443394443944539446394473944839449394503945139452394533945439455394563945739458394593946039461394623946339464394653946639467394683946939470394713947239473394743947539476394773947839479394803948139482394833948439485394863948739488394893949039491394923949339494394953949639497394983949939500395013950239503395043950539506395073950839509395103951139512395133951439515395163951739518395193952039521395223952339524395253952639527395283952939530395313953239533395343953539536395373953839539395403954139542395433954439545395463954739548395493955039551395523955339554395553955639557395583955939560395613956239563395643956539566395673956839569395703957139572395733957439575395763957739578395793958039581395823958339584395853958639587395883958939590395913959239593395943959539596395973959839599396003960139602396033960439605396063960739608396093961039611396123961339614396153961639617396183961939620396213962239623396243962539626396273962839629396303963139632396333963439635396363963739638396393964039641396423964339644396453964639647396483964939650396513965239653396543965539656396573965839659396603966139662396633966439665396663966739668396693967039671396723967339674396753967639677396783967939680396813968239683396843968539686396873968839689396903969139692396933969439695396963969739698396993970039701397023970339704397053970639707397083970939710397113971239713397143971539716397173971839719397203972139722397233972439725397263972739728397293973039731397323973339734397353973639737397383973939740397413974239743397443974539746397473974839749397503975139752397533975439755397563975739758397593976039761397623976339764397653976639767397683976939770397713977239773397743977539776397773977839779397803978139782397833978439785397863978739788397893979039791397923979339794397953979639797397983979939800398013980239803398043980539806398073980839809398103981139812398133981439815398163981739818398193982039821398223982339824398253982639827398283982939830398313983239833398343983539836398373983839839398403984139842398433984439845398463984739848398493985039851398523985339854398553985639857398583985939860398613986239863398643986539866398673986839869398703987139872398733987439875398763987739878398793988039881398823988339884398853988639887398883988939890398913989239893398943989539896398973989839899399003990139902399033990439905399063990739908399093991039911399123991339914399153991639917399183991939920399213992239923399243992539926399273992839929399303993139932399333993439935399363993739938399393994039941399423994339944399453994639947399483994939950399513995239953399543995539956399573995839959399603996139962399633996439965399663996739968399693997039971399723997339974399753997639977399783997939980399813998239983399843998539986399873998839989399903999139992399933999439995399963999739998399994000040001400024000340004400054000640007400084000940010400114001240013400144001540016400174001840019400204002140022400234002440025400264002740028400294003040031400324003340034400354003640037400384003940040400414004240043400444004540046400474004840049400504005140052400534005440055400564005740058400594006040061400624006340064400654006640067400684006940070400714007240073400744007540076400774007840079400804008140082400834008440085400864008740088400894009040091400924009340094400954009640097400984009940100401014010240103401044010540106401074010840109401104011140112401134011440115401164011740118401194012040121401224012340124401254012640127401284012940130401314013240133401344013540136401374013840139401404014140142401434014440145401464014740148401494015040151401524015340154401554015640157401584015940160401614016240163401644016540166401674016840169401704017140172401734017440175401764017740178401794018040181401824018340184401854018640187401884018940190401914019240193401944019540196401974019840199402004020140202402034020440205402064020740208402094021040211402124021340214402154021640217402184021940220402214022240223402244022540226402274022840229402304023140232402334023440235402364023740238402394024040241402424024340244402454024640247402484024940250402514025240253402544025540256402574025840259402604026140262402634026440265402664026740268402694027040271402724027340274402754027640277402784027940280402814028240283402844028540286402874028840289402904029140292402934029440295402964029740298402994030040301403024030340304403054030640307403084030940310403114031240313403144031540316403174031840319403204032140322403234032440325403264032740328403294033040331403324033340334403354033640337403384033940340403414034240343403444034540346403474034840349403504035140352403534035440355403564035740358403594036040361403624036340364403654036640367403684036940370403714037240373403744037540376403774037840379403804038140382403834038440385403864038740388403894039040391403924039340394403954039640397403984039940400404014040240403404044040540406404074040840409404104041140412404134041440415404164041740418404194042040421404224042340424404254042640427404284042940430404314043240433404344043540436404374043840439404404044140442404434044440445404464044740448404494045040451404524045340454404554045640457404584045940460404614046240463404644046540466404674046840469404704047140472404734047440475404764047740478404794048040481404824048340484404854048640487404884048940490404914049240493404944049540496404974049840499405004050140502405034050440505405064050740508405094051040511405124051340514405154051640517405184051940520405214052240523405244052540526405274052840529405304053140532405334053440535405364053740538405394054040541405424054340544405454054640547405484054940550405514055240553405544055540556405574055840559405604056140562405634056440565405664056740568405694057040571405724057340574405754057640577405784057940580405814058240583405844058540586405874058840589405904059140592405934059440595405964059740598405994060040601406024060340604406054060640607406084060940610406114061240613406144061540616406174061840619406204062140622406234062440625406264062740628406294063040631406324063340634406354063640637406384063940640406414064240643406444064540646406474064840649406504065140652406534065440655406564065740658406594066040661406624066340664406654066640667406684066940670406714067240673406744067540676406774067840679406804068140682406834068440685406864068740688406894069040691406924069340694406954069640697406984069940700407014070240703407044070540706407074070840709407104071140712407134071440715407164071740718407194072040721407224072340724407254072640727407284072940730407314073240733407344073540736407374073840739407404074140742407434074440745407464074740748407494075040751407524075340754407554075640757407584075940760407614076240763407644076540766407674076840769407704077140772407734077440775407764077740778407794078040781407824078340784407854078640787407884078940790407914079240793407944079540796407974079840799408004080140802408034080440805408064080740808408094081040811408124081340814408154081640817408184081940820408214082240823408244082540826408274082840829408304083140832408334083440835408364083740838408394084040841408424084340844408454084640847408484084940850408514085240853408544085540856408574085840859408604086140862408634086440865408664086740868408694087040871408724087340874408754087640877408784087940880408814088240883408844088540886408874088840889408904089140892408934089440895408964089740898408994090040901409024090340904409054090640907409084090940910409114091240913409144091540916409174091840919409204092140922409234092440925409264092740928409294093040931409324093340934409354093640937409384093940940409414094240943409444094540946409474094840949409504095140952409534095440955409564095740958409594096040961409624096340964409654096640967409684096940970409714097240973409744097540976409774097840979409804098140982409834098440985409864098740988409894099040991409924099340994409954099640997409984099941000410014100241003410044100541006410074100841009410104101141012410134101441015410164101741018410194102041021410224102341024410254102641027410284102941030410314103241033410344103541036410374103841039410404104141042410434104441045410464104741048410494105041051410524105341054410554105641057410584105941060410614106241063410644106541066410674106841069410704107141072410734107441075410764107741078410794108041081410824108341084410854108641087410884108941090410914109241093410944109541096410974109841099411004110141102411034110441105411064110741108411094111041111411124111341114411154111641117411184111941120411214112241123411244112541126411274112841129411304113141132411334113441135411364113741138411394114041141411424114341144411454114641147411484114941150411514115241153411544115541156411574115841159411604116141162411634116441165411664116741168411694117041171411724117341174411754117641177411784117941180411814118241183411844118541186411874118841189411904119141192411934119441195411964119741198411994120041201412024120341204412054120641207412084120941210412114121241213412144121541216412174121841219412204122141222412234122441225412264122741228412294123041231412324123341234412354123641237412384123941240412414124241243412444124541246412474124841249412504125141252412534125441255412564125741258412594126041261412624126341264412654126641267412684126941270412714127241273412744127541276412774127841279412804128141282412834128441285412864128741288412894129041291412924129341294412954129641297412984129941300413014130241303413044130541306413074130841309413104131141312413134131441315413164131741318413194132041321413224132341324413254132641327413284132941330413314133241333413344133541336413374133841339413404134141342413434134441345413464134741348413494135041351413524135341354413554135641357413584135941360413614136241363413644136541366413674136841369413704137141372413734137441375413764137741378413794138041381413824138341384413854138641387413884138941390413914139241393413944139541396413974139841399414004140141402414034140441405414064140741408414094141041411414124141341414414154141641417414184141941420414214142241423414244142541426414274142841429414304143141432414334143441435414364143741438414394144041441414424144341444414454144641447414484144941450414514145241453414544145541456414574145841459414604146141462414634146441465414664146741468414694147041471414724147341474414754147641477414784147941480414814148241483414844148541486414874148841489414904149141492414934149441495414964149741498414994150041501415024150341504415054150641507415084150941510415114151241513415144151541516415174151841519415204152141522415234152441525415264152741528415294153041531415324153341534415354153641537415384153941540415414154241543415444154541546415474154841549415504155141552415534155441555415564155741558415594156041561415624156341564415654156641567415684156941570415714157241573415744157541576415774157841579415804158141582415834158441585415864158741588415894159041591415924159341594415954159641597415984159941600416014160241603416044160541606416074160841609416104161141612416134161441615416164161741618416194162041621416224162341624416254162641627416284162941630416314163241633416344163541636416374163841639416404164141642416434164441645416464164741648416494165041651416524165341654416554165641657416584165941660416614166241663416644166541666416674166841669416704167141672416734167441675416764167741678416794168041681416824168341684416854168641687416884168941690416914169241693416944169541696416974169841699417004170141702417034170441705417064170741708417094171041711417124171341714417154171641717417184171941720417214172241723417244172541726417274172841729417304173141732417334173441735417364173741738417394174041741417424174341744417454174641747417484174941750417514175241753417544175541756417574175841759417604176141762417634176441765417664176741768417694177041771417724177341774417754177641777417784177941780417814178241783417844178541786417874178841789417904179141792417934179441795417964179741798417994180041801418024180341804418054180641807418084180941810418114181241813418144181541816418174181841819418204182141822418234182441825418264182741828418294183041831418324183341834418354183641837418384183941840418414184241843418444184541846418474184841849418504185141852418534185441855418564185741858418594186041861418624186341864418654186641867418684186941870418714187241873418744187541876418774187841879418804188141882418834188441885418864188741888418894189041891418924189341894418954189641897418984189941900419014190241903419044190541906419074190841909419104191141912419134191441915419164191741918419194192041921419224192341924419254192641927419284192941930419314193241933419344193541936419374193841939419404194141942419434194441945419464194741948419494195041951419524195341954419554195641957419584195941960419614196241963419644196541966419674196841969419704197141972419734197441975419764197741978419794198041981419824198341984419854198641987419884198941990419914199241993419944199541996419974199841999420004200142002420034200442005420064200742008420094201042011420124201342014420154201642017420184201942020420214202242023420244202542026420274202842029420304203142032420334203442035420364203742038420394204042041420424204342044420454204642047420484204942050420514205242053420544205542056420574205842059420604206142062420634206442065420664206742068420694207042071420724207342074420754207642077420784207942080420814208242083420844208542086420874208842089420904209142092420934209442095420964209742098420994210042101421024210342104421054210642107421084210942110421114211242113421144211542116421174211842119421204212142122421234212442125421264212742128421294213042131421324213342134421354213642137421384213942140421414214242143421444214542146421474214842149421504215142152421534215442155421564215742158421594216042161421624216342164421654216642167421684216942170421714217242173421744217542176421774217842179421804218142182421834218442185421864218742188421894219042191421924219342194421954219642197421984219942200422014220242203422044220542206422074220842209422104221142212422134221442215422164221742218422194222042221422224222342224422254222642227422284222942230422314223242233422344223542236422374223842239422404224142242422434224442245422464224742248422494225042251422524225342254422554225642257422584225942260422614226242263422644226542266422674226842269422704227142272422734227442275422764227742278422794228042281422824228342284422854228642287422884228942290422914229242293422944229542296422974229842299423004230142302423034230442305423064230742308423094231042311423124231342314423154231642317423184231942320423214232242323423244232542326423274232842329423304233142332423334233442335423364233742338423394234042341423424234342344423454234642347423484234942350423514235242353423544235542356423574235842359423604236142362423634236442365423664236742368423694237042371423724237342374423754237642377423784237942380423814238242383423844238542386423874238842389423904239142392423934239442395423964239742398423994240042401424024240342404424054240642407424084240942410424114241242413424144241542416424174241842419424204242142422424234242442425424264242742428424294243042431424324243342434424354243642437424384243942440424414244242443424444244542446424474244842449424504245142452424534245442455424564245742458424594246042461424624246342464424654246642467424684246942470424714247242473424744247542476424774247842479424804248142482424834248442485424864248742488424894249042491424924249342494424954249642497424984249942500425014250242503425044250542506425074250842509425104251142512425134251442515425164251742518425194252042521425224252342524425254252642527425284252942530425314253242533425344253542536425374253842539425404254142542425434254442545425464254742548425494255042551425524255342554425554255642557425584255942560425614256242563425644256542566425674256842569425704257142572425734257442575425764257742578425794258042581425824258342584425854258642587425884258942590425914259242593425944259542596425974259842599426004260142602426034260442605426064260742608426094261042611426124261342614426154261642617426184261942620426214262242623426244262542626426274262842629426304263142632426334263442635426364263742638426394264042641426424264342644426454264642647426484264942650426514265242653426544265542656426574265842659426604266142662426634266442665426664266742668426694267042671426724267342674426754267642677426784267942680426814268242683426844268542686426874268842689426904269142692426934269442695426964269742698426994270042701427024270342704427054270642707427084270942710427114271242713427144271542716427174271842719427204272142722427234272442725427264272742728427294273042731427324273342734427354273642737427384273942740427414274242743427444274542746427474274842749427504275142752427534275442755427564275742758427594276042761427624276342764427654276642767427684276942770427714277242773427744277542776427774277842779427804278142782427834278442785427864278742788427894279042791427924279342794427954279642797427984279942800428014280242803428044280542806428074280842809428104281142812428134281442815428164281742818428194282042821428224282342824428254282642827428284282942830428314283242833428344283542836428374283842839428404284142842428434284442845428464284742848428494285042851428524285342854428554285642857428584285942860428614286242863428644286542866428674286842869428704287142872428734287442875428764287742878428794288042881428824288342884428854288642887428884288942890428914289242893428944289542896428974289842899429004290142902429034290442905429064290742908429094291042911429124291342914429154291642917429184291942920429214292242923429244292542926429274292842929429304293142932429334293442935429364293742938429394294042941429424294342944429454294642947429484294942950429514295242953429544295542956429574295842959429604296142962429634296442965429664296742968429694297042971429724297342974429754297642977429784297942980429814298242983429844298542986429874298842989429904299142992429934299442995429964299742998429994300043001430024300343004430054300643007430084300943010430114301243013430144301543016430174301843019430204302143022430234302443025430264302743028430294303043031430324303343034430354303643037430384303943040430414304243043430444304543046430474304843049430504305143052430534305443055430564305743058430594306043061430624306343064430654306643067430684306943070430714307243073430744307543076430774307843079430804308143082430834308443085430864308743088430894309043091430924309343094430954309643097430984309943100431014310243103431044310543106431074310843109431104311143112431134311443115431164311743118431194312043121431224312343124431254312643127431284312943130431314313243133431344313543136431374313843139431404314143142431434314443145431464314743148431494315043151431524315343154431554315643157431584315943160431614316243163431644316543166431674316843169431704317143172431734317443175431764317743178431794318043181431824318343184431854318643187431884318943190431914319243193431944319543196431974319843199432004320143202432034320443205432064320743208432094321043211432124321343214432154321643217432184321943220432214322243223432244322543226432274322843229432304323143232432334323443235432364323743238432394324043241432424324343244432454324643247432484324943250432514325243253432544325543256432574325843259432604326143262432634326443265432664326743268432694327043271432724327343274432754327643277432784327943280432814328243283432844328543286432874328843289432904329143292432934329443295432964329743298432994330043301433024330343304433054330643307433084330943310433114331243313433144331543316433174331843319433204332143322433234332443325433264332743328433294333043331433324333343334433354333643337433384333943340433414334243343433444334543346433474334843349433504335143352433534335443355433564335743358433594336043361433624336343364433654336643367433684336943370433714337243373433744337543376433774337843379433804338143382433834338443385433864338743388433894339043391433924339343394433954339643397433984339943400434014340243403434044340543406434074340843409434104341143412434134341443415434164341743418434194342043421434224342343424434254342643427434284342943430434314343243433434344343543436434374343843439434404344143442434434344443445434464344743448434494345043451434524345343454434554345643457434584345943460434614346243463434644346543466434674346843469434704347143472434734347443475434764347743478434794348043481434824348343484434854348643487434884348943490434914349243493434944349543496434974349843499435004350143502435034350443505435064350743508435094351043511435124351343514435154351643517435184351943520435214352243523435244352543526435274352843529435304353143532435334353443535435364353743538435394354043541435424354343544435454354643547435484354943550435514355243553435544355543556435574355843559435604356143562435634356443565435664356743568435694357043571435724357343574435754357643577435784357943580435814358243583435844358543586435874358843589435904359143592435934359443595435964359743598435994360043601436024360343604436054360643607436084360943610436114361243613436144361543616436174361843619436204362143622436234362443625436264362743628436294363043631436324363343634436354363643637436384363943640436414364243643436444364543646436474364843649436504365143652436534365443655436564365743658436594366043661436624366343664436654366643667436684366943670436714367243673436744367543676436774367843679436804368143682436834368443685436864368743688436894369043691436924369343694436954369643697436984369943700437014370243703437044370543706437074370843709437104371143712437134371443715437164371743718437194372043721437224372343724437254372643727437284372943730437314373243733437344373543736437374373843739437404374143742437434374443745437464374743748437494375043751437524375343754437554375643757437584375943760437614376243763437644376543766437674376843769437704377143772437734377443775437764377743778437794378043781437824378343784437854378643787437884378943790437914379243793437944379543796437974379843799438004380143802438034380443805438064380743808438094381043811438124381343814438154381643817438184381943820438214382243823438244382543826438274382843829438304383143832438334383443835438364383743838438394384043841438424384343844438454384643847438484384943850438514385243853438544385543856438574385843859438604386143862438634386443865438664386743868438694387043871438724387343874438754387643877438784387943880438814388243883438844388543886438874388843889438904389143892438934389443895438964389743898438994390043901439024390343904439054390643907439084390943910439114391243913439144391543916439174391843919439204392143922439234392443925439264392743928439294393043931439324393343934439354393643937439384393943940439414394243943439444394543946439474394843949439504395143952439534395443955439564395743958439594396043961439624396343964439654396643967439684396943970439714397243973439744397543976439774397843979439804398143982439834398443985439864398743988439894399043991439924399343994439954399643997439984399944000440014400244003440044400544006440074400844009440104401144012440134401444015440164401744018440194402044021440224402344024440254402644027440284402944030440314403244033440344403544036440374403844039440404404144042440434404444045440464404744048440494405044051440524405344054440554405644057440584405944060440614406244063440644406544066440674406844069440704407144072440734407444075440764407744078440794408044081440824408344084440854408644087440884408944090440914409244093440944409544096440974409844099441004410144102441034410444105441064410744108441094411044111441124411344114441154411644117441184411944120441214412244123441244412544126441274412844129441304413144132441334413444135441364413744138441394414044141441424414344144441454414644147441484414944150441514415244153441544415544156441574415844159441604416144162441634416444165441664416744168441694417044171441724417344174441754417644177441784417944180441814418244183441844418544186441874418844189441904419144192441934419444195441964419744198441994420044201442024420344204442054420644207442084420944210442114421244213442144421544216442174421844219442204422144222442234422444225442264422744228442294423044231442324423344234442354423644237442384423944240442414424244243442444424544246442474424844249442504425144252442534425444255442564425744258442594426044261442624426344264442654426644267442684426944270442714427244273442744427544276442774427844279442804428144282442834428444285442864428744288442894429044291442924429344294442954429644297442984429944300443014430244303443044430544306443074430844309443104431144312443134431444315443164431744318443194432044321443224432344324443254432644327443284432944330443314433244333443344433544336443374433844339443404434144342443434434444345443464434744348443494435044351443524435344354443554435644357443584435944360443614436244363443644436544366443674436844369443704437144372443734437444375443764437744378443794438044381443824438344384443854438644387443884438944390443914439244393443944439544396443974439844399444004440144402444034440444405444064440744408444094441044411444124441344414444154441644417444184441944420444214442244423444244442544426444274442844429444304443144432444334443444435444364443744438444394444044441444424444344444444454444644447444484444944450444514445244453444544445544456444574445844459444604446144462444634446444465444664446744468444694447044471444724447344474444754447644477444784447944480444814448244483444844448544486444874448844489444904449144492444934449444495444964449744498444994450044501445024450344504445054450644507445084450944510445114451244513445144451544516445174451844519445204452144522445234452444525445264452744528445294453044531445324453344534445354453644537445384453944540445414454244543445444454544546445474454844549445504455144552445534455444555445564455744558445594456044561445624456344564445654456644567445684456944570445714457244573445744457544576445774457844579445804458144582445834458444585445864458744588445894459044591445924459344594445954459644597445984459944600446014460244603446044460544606446074460844609446104461144612446134461444615446164461744618446194462044621446224462344624446254462644627446284462944630446314463244633446344463544636446374463844639446404464144642446434464444645446464464744648446494465044651446524465344654446554465644657446584465944660446614466244663446644466544666446674466844669446704467144672446734467444675446764467744678446794468044681446824468344684446854468644687446884468944690446914469244693446944469544696446974469844699447004470144702447034470444705447064470744708447094471044711447124471344714447154471644717447184471944720447214472244723447244472544726447274472844729447304473144732447334473444735447364473744738447394474044741447424474344744447454474644747447484474944750447514475244753447544475544756447574475844759447604476144762447634476444765447664476744768447694477044771447724477344774447754477644777447784477944780447814478244783447844478544786447874478844789447904479144792447934479444795447964479744798447994480044801448024480344804448054480644807448084480944810448114481244813448144481544816448174481844819448204482144822448234482444825448264482744828448294483044831448324483344834448354483644837448384483944840448414484244843448444484544846448474484844849448504485144852448534485444855448564485744858448594486044861448624486344864448654486644867448684486944870448714487244873448744487544876448774487844879448804488144882448834488444885448864488744888448894489044891448924489344894448954489644897448984489944900449014490244903449044490544906449074490844909449104491144912449134491444915449164491744918449194492044921449224492344924449254492644927449284492944930449314493244933449344493544936449374493844939449404494144942449434494444945449464494744948449494495044951449524495344954449554495644957449584495944960449614496244963449644496544966449674496844969449704497144972449734497444975449764497744978449794498044981449824498344984449854498644987449884498944990449914499244993449944499544996449974499844999450004500145002450034500445005450064500745008450094501045011450124501345014450154501645017450184501945020450214502245023450244502545026450274502845029450304503145032450334503445035450364503745038450394504045041450424504345044450454504645047450484504945050450514505245053450544505545056450574505845059450604506145062450634506445065450664506745068450694507045071450724507345074450754507645077450784507945080450814508245083450844508545086450874508845089450904509145092450934509445095450964509745098450994510045101451024510345104451054510645107451084510945110451114511245113451144511545116451174511845119451204512145122451234512445125451264512745128451294513045131451324513345134451354513645137451384513945140451414514245143451444514545146451474514845149451504515145152451534515445155451564515745158451594516045161451624516345164451654516645167451684516945170451714517245173451744517545176451774517845179451804518145182451834518445185451864518745188451894519045191451924519345194451954519645197451984519945200452014520245203452044520545206452074520845209452104521145212452134521445215452164521745218452194522045221452224522345224452254522645227452284522945230452314523245233452344523545236452374523845239452404524145242452434524445245452464524745248452494525045251452524525345254452554525645257452584525945260452614526245263452644526545266452674526845269452704527145272452734527445275452764527745278452794528045281452824528345284452854528645287452884528945290452914529245293452944529545296452974529845299453004530145302453034530445305453064530745308453094531045311453124531345314453154531645317453184531945320453214532245323453244532545326453274532845329453304533145332453334533445335453364533745338453394534045341453424534345344453454534645347453484534945350453514535245353453544535545356453574535845359453604536145362453634536445365453664536745368453694537045371453724537345374453754537645377453784537945380453814538245383453844538545386453874538845389453904539145392453934539445395453964539745398453994540045401454024540345404454054540645407454084540945410454114541245413454144541545416454174541845419454204542145422454234542445425454264542745428454294543045431454324543345434454354543645437454384543945440454414544245443454444544545446454474544845449454504545145452454534545445455454564545745458454594546045461454624546345464454654546645467454684546945470454714547245473454744547545476454774547845479454804548145482454834548445485454864548745488454894549045491454924549345494454954549645497454984549945500455014550245503455044550545506455074550845509455104551145512455134551445515455164551745518455194552045521455224552345524455254552645527455284552945530455314553245533455344553545536455374553845539455404554145542455434554445545455464554745548455494555045551455524555345554455554555645557455584555945560455614556245563455644556545566455674556845569455704557145572455734557445575455764557745578455794558045581455824558345584455854558645587455884558945590455914559245593455944559545596455974559845599456004560145602456034560445605456064560745608456094561045611456124561345614456154561645617456184561945620456214562245623456244562545626456274562845629456304563145632456334563445635456364563745638456394564045641456424564345644456454564645647456484564945650456514565245653456544565545656456574565845659456604566145662456634566445665456664566745668456694567045671456724567345674456754567645677456784567945680456814568245683456844568545686456874568845689456904569145692456934569445695456964569745698456994570045701457024570345704457054570645707457084570945710457114571245713457144571545716457174571845719457204572145722457234572445725457264572745728457294573045731457324573345734457354573645737457384573945740457414574245743457444574545746457474574845749457504575145752457534575445755457564575745758457594576045761457624576345764457654576645767457684576945770457714577245773457744577545776457774577845779457804578145782457834578445785457864578745788457894579045791457924579345794457954579645797457984579945800458014580245803458044580545806458074580845809458104581145812458134581445815458164581745818458194582045821458224582345824458254582645827458284582945830458314583245833458344583545836458374583845839458404584145842458434584445845458464584745848458494585045851458524585345854458554585645857458584585945860458614586245863458644586545866458674586845869458704587145872458734587445875458764587745878458794588045881458824588345884458854588645887458884588945890458914589245893458944589545896458974589845899459004590145902459034590445905459064590745908459094591045911459124591345914459154591645917459184591945920459214592245923459244592545926459274592845929459304593145932459334593445935459364593745938459394594045941459424594345944459454594645947459484594945950459514595245953459544595545956459574595845959459604596145962459634596445965459664596745968459694597045971459724597345974459754597645977459784597945980459814598245983459844598545986459874598845989459904599145992459934599445995459964599745998459994600046001460024600346004460054600646007460084600946010460114601246013460144601546016460174601846019460204602146022460234602446025460264602746028460294603046031460324603346034460354603646037460384603946040460414604246043460444604546046460474604846049460504605146052460534605446055460564605746058460594606046061460624606346064460654606646067460684606946070460714607246073460744607546076460774607846079460804608146082460834608446085460864608746088460894609046091460924609346094460954609646097460984609946100461014610246103461044610546106461074610846109461104611146112461134611446115461164611746118461194612046121461224612346124461254612646127461284612946130461314613246133461344613546136461374613846139461404614146142461434614446145461464614746148461494615046151461524615346154461554615646157461584615946160461614616246163461644616546166461674616846169461704617146172461734617446175461764617746178461794618046181461824618346184461854618646187461884618946190461914619246193461944619546196461974619846199462004620146202462034620446205462064620746208462094621046211462124621346214462154621646217462184621946220462214622246223462244622546226462274622846229462304623146232462334623446235462364623746238462394624046241462424624346244462454624646247462484624946250462514625246253462544625546256462574625846259462604626146262462634626446265462664626746268462694627046271462724627346274462754627646277462784627946280462814628246283462844628546286462874628846289462904629146292462934629446295462964629746298462994630046301463024630346304463054630646307463084630946310463114631246313463144631546316463174631846319463204632146322463234632446325463264632746328463294633046331463324633346334463354633646337463384633946340463414634246343463444634546346463474634846349463504635146352463534635446355463564635746358463594636046361463624636346364463654636646367463684636946370463714637246373463744637546376463774637846379463804638146382463834638446385463864638746388463894639046391463924639346394463954639646397463984639946400464014640246403464044640546406464074640846409464104641146412464134641446415464164641746418464194642046421464224642346424464254642646427464284642946430464314643246433464344643546436464374643846439464404644146442464434644446445464464644746448464494645046451464524645346454464554645646457464584645946460464614646246463464644646546466464674646846469464704647146472464734647446475464764647746478464794648046481464824648346484464854648646487464884648946490464914649246493464944649546496464974649846499465004650146502465034650446505465064650746508465094651046511465124651346514465154651646517465184651946520465214652246523465244652546526465274652846529465304653146532465334653446535465364653746538465394654046541465424654346544465454654646547465484654946550465514655246553465544655546556465574655846559465604656146562465634656446565465664656746568465694657046571465724657346574465754657646577465784657946580465814658246583465844658546586465874658846589465904659146592465934659446595465964659746598465994660046601466024660346604466054660646607466084660946610466114661246613466144661546616466174661846619466204662146622466234662446625466264662746628466294663046631466324663346634466354663646637466384663946640466414664246643466444664546646466474664846649466504665146652466534665446655466564665746658466594666046661466624666346664466654666646667466684666946670466714667246673466744667546676466774667846679466804668146682466834668446685466864668746688466894669046691466924669346694466954669646697466984669946700467014670246703467044670546706467074670846709467104671146712467134671446715467164671746718467194672046721467224672346724467254672646727467284672946730467314673246733467344673546736467374673846739467404674146742467434674446745467464674746748467494675046751467524675346754467554675646757467584675946760467614676246763467644676546766467674676846769467704677146772467734677446775467764677746778467794678046781467824678346784467854678646787467884678946790467914679246793467944679546796467974679846799468004680146802468034680446805468064680746808468094681046811468124681346814468154681646817468184681946820468214682246823468244682546826468274682846829468304683146832468334683446835468364683746838468394684046841468424684346844468454684646847468484684946850468514685246853468544685546856468574685846859468604686146862468634686446865468664686746868468694687046871468724687346874468754687646877468784687946880468814688246883468844688546886468874688846889468904689146892468934689446895468964689746898468994690046901469024690346904469054690646907469084690946910469114691246913469144691546916469174691846919469204692146922469234692446925469264692746928469294693046931469324693346934469354693646937469384693946940469414694246943469444694546946469474694846949469504695146952469534695446955469564695746958469594696046961469624696346964469654696646967469684696946970469714697246973469744697546976469774697846979469804698146982469834698446985469864698746988469894699046991469924699346994469954699646997469984699947000470014700247003470044700547006470074700847009470104701147012470134701447015470164701747018470194702047021470224702347024470254702647027470284702947030470314703247033470344703547036470374703847039470404704147042470434704447045470464704747048470494705047051470524705347054470554705647057470584705947060470614706247063470644706547066470674706847069470704707147072470734707447075470764707747078470794708047081470824708347084470854708647087470884708947090470914709247093470944709547096470974709847099471004710147102471034710447105471064710747108471094711047111471124711347114471154711647117471184711947120471214712247123471244712547126471274712847129471304713147132471334713447135471364713747138471394714047141471424714347144471454714647147471484714947150471514715247153471544715547156471574715847159471604716147162471634716447165471664716747168471694717047171471724717347174471754717647177471784717947180471814718247183471844718547186471874718847189471904719147192471934719447195471964719747198471994720047201472024720347204472054720647207472084720947210472114721247213472144721547216472174721847219472204722147222472234722447225472264722747228472294723047231472324723347234472354723647237472384723947240472414724247243472444724547246472474724847249472504725147252472534725447255472564725747258472594726047261472624726347264472654726647267472684726947270472714727247273472744727547276472774727847279472804728147282472834728447285472864728747288472894729047291472924729347294472954729647297472984729947300473014730247303473044730547306473074730847309473104731147312473134731447315473164731747318473194732047321473224732347324473254732647327473284732947330473314733247333473344733547336473374733847339473404734147342473434734447345473464734747348473494735047351473524735347354473554735647357473584735947360473614736247363473644736547366473674736847369473704737147372473734737447375473764737747378473794738047381473824738347384473854738647387473884738947390473914739247393473944739547396473974739847399474004740147402474034740447405474064740747408474094741047411474124741347414474154741647417474184741947420474214742247423474244742547426474274742847429474304743147432474334743447435474364743747438474394744047441474424744347444474454744647447474484744947450474514745247453474544745547456474574745847459474604746147462474634746447465474664746747468474694747047471474724747347474474754747647477474784747947480474814748247483474844748547486474874748847489474904749147492474934749447495474964749747498474994750047501475024750347504475054750647507475084750947510475114751247513475144751547516475174751847519475204752147522475234752447525475264752747528475294753047531475324753347534475354753647537475384753947540475414754247543475444754547546475474754847549475504755147552475534755447555475564755747558475594756047561475624756347564475654756647567475684756947570475714757247573475744757547576475774757847579475804758147582475834758447585475864758747588475894759047591475924759347594475954759647597475984759947600476014760247603476044760547606476074760847609476104761147612476134761447615476164761747618476194762047621476224762347624476254762647627476284762947630476314763247633476344763547636476374763847639476404764147642476434764447645476464764747648476494765047651476524765347654476554765647657476584765947660476614766247663476644766547666476674766847669476704767147672476734767447675476764767747678476794768047681476824768347684476854768647687476884768947690476914769247693476944769547696476974769847699477004770147702477034770447705477064770747708477094771047711477124771347714477154771647717477184771947720477214772247723477244772547726477274772847729477304773147732477334773447735477364773747738477394774047741477424774347744477454774647747477484774947750477514775247753477544775547756477574775847759477604776147762477634776447765477664776747768477694777047771477724777347774477754777647777477784777947780477814778247783477844778547786477874778847789477904779147792477934779447795477964779747798477994780047801478024780347804478054780647807478084780947810478114781247813478144781547816478174781847819478204782147822478234782447825478264782747828478294783047831478324783347834478354783647837478384783947840478414784247843478444784547846478474784847849478504785147852478534785447855478564785747858478594786047861478624786347864478654786647867478684786947870478714787247873478744787547876478774787847879478804788147882478834788447885478864788747888478894789047891478924789347894478954789647897478984789947900479014790247903479044790547906479074790847909479104791147912479134791447915479164791747918479194792047921479224792347924479254792647927479284792947930479314793247933479344793547936479374793847939479404794147942479434794447945479464794747948479494795047951479524795347954479554795647957479584795947960479614796247963479644796547966479674796847969479704797147972479734797447975479764797747978479794798047981479824798347984479854798647987479884798947990479914799247993479944799547996479974799847999480004800148002480034800448005480064800748008480094801048011480124801348014480154801648017480184801948020480214802248023480244802548026480274802848029480304803148032480334803448035480364803748038480394804048041480424804348044480454804648047480484804948050480514805248053480544805548056480574805848059480604806148062480634806448065480664806748068480694807048071480724807348074480754807648077480784807948080480814808248083480844808548086480874808848089480904809148092480934809448095480964809748098480994810048101481024810348104481054810648107481084810948110481114811248113481144811548116481174811848119481204812148122481234812448125481264812748128481294813048131481324813348134481354813648137481384813948140481414814248143481444814548146481474814848149481504815148152481534815448155481564815748158481594816048161481624816348164481654816648167481684816948170481714817248173481744817548176481774817848179481804818148182481834818448185481864818748188481894819048191481924819348194481954819648197481984819948200482014820248203482044820548206482074820848209482104821148212482134821448215482164821748218482194822048221482224822348224482254822648227482284822948230482314823248233482344823548236482374823848239482404824148242482434824448245482464824748248482494825048251482524825348254482554825648257482584825948260482614826248263482644826548266482674826848269482704827148272482734827448275482764827748278482794828048281482824828348284482854828648287482884828948290482914829248293482944829548296482974829848299483004830148302483034830448305483064830748308483094831048311483124831348314483154831648317483184831948320483214832248323483244832548326483274832848329483304833148332483334833448335483364833748338483394834048341483424834348344483454834648347483484834948350483514835248353483544835548356483574835848359483604836148362483634836448365483664836748368483694837048371483724837348374483754837648377483784837948380483814838248383483844838548386483874838848389483904839148392483934839448395483964839748398483994840048401484024840348404484054840648407484084840948410484114841248413484144841548416484174841848419484204842148422484234842448425484264842748428484294843048431484324843348434484354843648437484384843948440484414844248443484444844548446484474844848449484504845148452484534845448455484564845748458484594846048461484624846348464484654846648467484684846948470484714847248473484744847548476484774847848479484804848148482484834848448485484864848748488484894849048491484924849348494484954849648497484984849948500485014850248503485044850548506485074850848509485104851148512485134851448515485164851748518485194852048521485224852348524485254852648527485284852948530485314853248533485344853548536485374853848539485404854148542485434854448545485464854748548485494855048551485524855348554485554855648557485584855948560485614856248563485644856548566485674856848569485704857148572485734857448575485764857748578485794858048581485824858348584485854858648587485884858948590485914859248593485944859548596485974859848599486004860148602486034860448605486064860748608486094861048611486124861348614486154861648617486184861948620486214862248623486244862548626486274862848629486304863148632486334863448635486364863748638486394864048641486424864348644486454864648647486484864948650486514865248653486544865548656486574865848659486604866148662486634866448665486664866748668486694867048671486724867348674486754867648677486784867948680486814868248683486844868548686486874868848689486904869148692486934869448695486964869748698486994870048701487024870348704487054870648707487084870948710487114871248713487144871548716487174871848719487204872148722487234872448725487264872748728487294873048731487324873348734487354873648737487384873948740487414874248743487444874548746487474874848749487504875148752487534875448755487564875748758487594876048761487624876348764487654876648767487684876948770487714877248773487744877548776487774877848779487804878148782487834878448785487864878748788487894879048791487924879348794487954879648797487984879948800488014880248803488044880548806488074880848809488104881148812488134881448815488164881748818488194882048821488224882348824488254882648827488284882948830488314883248833488344883548836488374883848839488404884148842488434884448845488464884748848488494885048851488524885348854488554885648857488584885948860488614886248863488644886548866488674886848869488704887148872488734887448875488764887748878488794888048881488824888348884488854888648887488884888948890488914889248893488944889548896488974889848899489004890148902489034890448905489064890748908489094891048911489124891348914489154891648917489184891948920489214892248923489244892548926489274892848929489304893148932489334893448935489364893748938489394894048941489424894348944489454894648947489484894948950489514895248953489544895548956489574895848959489604896148962489634896448965489664896748968489694897048971489724897348974489754897648977489784897948980489814898248983489844898548986489874898848989489904899148992489934899448995489964899748998489994900049001490024900349004490054900649007490084900949010490114901249013490144901549016490174901849019490204902149022490234902449025490264902749028490294903049031490324903349034490354903649037490384903949040490414904249043490444904549046490474904849049490504905149052490534905449055490564905749058490594906049061490624906349064490654906649067490684906949070490714907249073490744907549076490774907849079490804908149082490834908449085490864908749088490894909049091490924909349094490954909649097490984909949100491014910249103491044910549106491074910849109491104911149112491134911449115491164911749118491194912049121491224912349124491254912649127491284912949130491314913249133491344913549136491374913849139491404914149142491434914449145491464914749148491494915049151491524915349154491554915649157491584915949160491614916249163491644916549166491674916849169491704917149172491734917449175491764917749178491794918049181491824918349184491854918649187491884918949190491914919249193491944919549196491974919849199492004920149202492034920449205492064920749208492094921049211492124921349214492154921649217492184921949220492214922249223492244922549226492274922849229492304923149232492334923449235492364923749238492394924049241492424924349244492454924649247492484924949250492514925249253492544925549256492574925849259492604926149262492634926449265492664926749268492694927049271492724927349274492754927649277492784927949280492814928249283492844928549286492874928849289492904929149292492934929449295492964929749298492994930049301493024930349304493054930649307493084930949310493114931249313493144931549316493174931849319493204932149322493234932449325493264932749328493294933049331493324933349334493354933649337493384933949340493414934249343493444934549346493474934849349493504935149352493534935449355493564935749358493594936049361493624936349364493654936649367493684936949370493714937249373493744937549376493774937849379493804938149382493834938449385493864938749388493894939049391493924939349394493954939649397493984939949400494014940249403494044940549406494074940849409494104941149412494134941449415494164941749418494194942049421494224942349424494254942649427494284942949430494314943249433494344943549436494374943849439494404944149442494434944449445494464944749448494494945049451494524945349454494554945649457494584945949460494614946249463494644946549466494674946849469494704947149472494734947449475494764947749478494794948049481494824948349484494854948649487494884948949490494914949249493494944949549496494974949849499495004950149502495034950449505495064950749508495094951049511495124951349514495154951649517495184951949520495214952249523495244952549526495274952849529495304953149532495334953449535495364953749538495394954049541495424954349544495454954649547495484954949550495514955249553495544955549556495574955849559495604956149562495634956449565495664956749568495694957049571495724957349574495754957649577495784957949580495814958249583495844958549586495874958849589495904959149592495934959449595495964959749598495994960049601496024960349604496054960649607496084960949610496114961249613496144961549616496174961849619496204962149622496234962449625496264962749628496294963049631496324963349634496354963649637496384963949640496414964249643496444964549646496474964849649496504965149652496534965449655496564965749658496594966049661496624966349664496654966649667496684966949670496714967249673496744967549676496774967849679496804968149682496834968449685496864968749688496894969049691496924969349694496954969649697496984969949700497014970249703497044970549706497074970849709497104971149712497134971449715497164971749718497194972049721497224972349724497254972649727497284972949730497314973249733497344973549736497374973849739497404974149742497434974449745497464974749748497494975049751497524975349754497554975649757497584975949760497614976249763497644976549766497674976849769497704977149772497734977449775497764977749778497794978049781497824978349784497854978649787497884978949790497914979249793497944979549796497974979849799498004980149802498034980449805498064980749808498094981049811498124981349814498154981649817498184981949820498214982249823498244982549826498274982849829498304983149832498334983449835498364983749838498394984049841498424984349844498454984649847498484984949850498514985249853498544985549856498574985849859498604986149862498634986449865498664986749868498694987049871498724987349874498754987649877498784987949880498814988249883498844988549886498874988849889498904989149892498934989449895498964989749898498994990049901499024990349904499054990649907499084990949910499114991249913499144991549916499174991849919499204992149922499234992449925499264992749928499294993049931499324993349934499354993649937499384993949940499414994249943499444994549946499474994849949499504995149952499534995449955499564995749958499594996049961499624996349964499654996649967499684996949970499714997249973499744997549976499774997849979499804998149982499834998449985499864998749988499894999049991499924999349994499954999649997499984999950000500015000250003500045000550006500075000850009500105001150012500135001450015500165001750018500195002050021500225002350024500255002650027500285002950030500315003250033500345003550036500375003850039500405004150042500435004450045500465004750048500495005050051500525005350054500555005650057500585005950060500615006250063500645006550066500675006850069500705007150072500735007450075500765007750078500795008050081500825008350084500855008650087500885008950090500915009250093500945009550096500975009850099501005010150102501035010450105501065010750108501095011050111501125011350114501155011650117501185011950120501215012250123501245012550126501275012850129501305013150132501335013450135501365013750138501395014050141501425014350144501455014650147501485014950150501515015250153501545015550156501575015850159501605016150162501635016450165501665016750168501695017050171501725017350174501755017650177501785017950180501815018250183501845018550186501875018850189501905019150192501935019450195501965019750198501995020050201502025020350204502055020650207502085020950210502115021250213502145021550216502175021850219502205022150222502235022450225502265022750228502295023050231502325023350234502355023650237502385023950240502415024250243502445024550246502475024850249502505025150252502535025450255502565025750258502595026050261502625026350264502655026650267502685026950270502715027250273502745027550276502775027850279502805028150282502835028450285502865028750288502895029050291502925029350294502955029650297502985029950300503015030250303503045030550306503075030850309503105031150312503135031450315503165031750318503195032050321503225032350324503255032650327503285032950330503315033250333503345033550336503375033850339503405034150342503435034450345503465034750348503495035050351503525035350354503555035650357503585035950360503615036250363503645036550366503675036850369503705037150372503735037450375503765037750378503795038050381503825038350384503855038650387503885038950390503915039250393503945039550396503975039850399504005040150402504035040450405504065040750408504095041050411504125041350414504155041650417504185041950420504215042250423504245042550426504275042850429504305043150432504335043450435504365043750438504395044050441504425044350444504455044650447504485044950450504515045250453504545045550456504575045850459504605046150462504635046450465504665046750468504695047050471504725047350474504755047650477504785047950480504815048250483504845048550486504875048850489504905049150492504935049450495504965049750498504995050050501505025050350504505055050650507505085050950510505115051250513505145051550516505175051850519505205052150522505235052450525505265052750528505295053050531505325053350534505355053650537505385053950540505415054250543505445054550546505475054850549505505055150552505535055450555505565055750558505595056050561505625056350564505655056650567505685056950570505715057250573505745057550576505775057850579505805058150582505835058450585505865058750588505895059050591505925059350594505955059650597505985059950600506015060250603506045060550606506075060850609506105061150612506135061450615506165061750618506195062050621506225062350624506255062650627506285062950630506315063250633506345063550636506375063850639506405064150642506435064450645506465064750648506495065050651506525065350654506555065650657506585065950660506615066250663506645066550666506675066850669506705067150672506735067450675506765067750678506795068050681506825068350684506855068650687506885068950690506915069250693506945069550696506975069850699507005070150702507035070450705507065070750708507095071050711507125071350714507155071650717507185071950720507215072250723507245072550726507275072850729507305073150732507335073450735507365073750738507395074050741507425074350744507455074650747507485074950750507515075250753507545075550756507575075850759507605076150762507635076450765507665076750768507695077050771507725077350774507755077650777507785077950780507815078250783507845078550786507875078850789507905079150792507935079450795507965079750798507995080050801508025080350804508055080650807508085080950810508115081250813508145081550816508175081850819508205082150822508235082450825508265082750828508295083050831508325083350834508355083650837508385083950840508415084250843508445084550846508475084850849508505085150852508535085450855508565085750858508595086050861508625086350864508655086650867508685086950870508715087250873508745087550876508775087850879508805088150882508835088450885508865088750888508895089050891508925089350894508955089650897508985089950900509015090250903509045090550906509075090850909509105091150912509135091450915509165091750918509195092050921509225092350924509255092650927509285092950930509315093250933509345093550936509375093850939509405094150942509435094450945509465094750948509495095050951509525095350954509555095650957509585095950960509615096250963509645096550966509675096850969509705097150972509735097450975509765097750978509795098050981509825098350984509855098650987509885098950990509915099250993509945099550996509975099850999510005100151002510035100451005510065100751008510095101051011510125101351014510155101651017510185101951020510215102251023510245102551026510275102851029510305103151032510335103451035510365103751038510395104051041510425104351044510455104651047510485104951050510515105251053510545105551056510575105851059510605106151062510635106451065510665106751068510695107051071510725107351074510755107651077510785107951080510815108251083510845108551086510875108851089510905109151092510935109451095510965109751098510995110051101511025110351104511055110651107511085110951110511115111251113511145111551116511175111851119511205112151122511235112451125511265112751128511295113051131511325113351134511355113651137511385113951140511415114251143511445114551146511475114851149511505115151152511535115451155511565115751158511595116051161511625116351164511655116651167511685116951170511715117251173511745117551176511775117851179511805118151182511835118451185511865118751188511895119051191511925119351194511955119651197511985119951200512015120251203512045120551206512075120851209512105121151212512135121451215512165121751218512195122051221512225122351224512255122651227512285122951230512315123251233512345123551236512375123851239512405124151242512435124451245512465124751248512495125051251512525125351254512555125651257512585125951260512615126251263512645126551266512675126851269512705127151272512735127451275512765127751278512795128051281512825128351284512855128651287512885128951290512915129251293512945129551296512975129851299513005130151302513035130451305513065130751308513095131051311513125131351314513155131651317513185131951320513215132251323513245132551326513275132851329513305133151332513335133451335513365133751338513395134051341513425134351344513455134651347513485134951350513515135251353513545135551356513575135851359513605136151362513635136451365513665136751368513695137051371513725137351374513755137651377513785137951380513815138251383513845138551386513875138851389513905139151392513935139451395513965139751398513995140051401514025140351404514055140651407514085140951410514115141251413514145141551416514175141851419514205142151422514235142451425514265142751428514295143051431514325143351434514355143651437514385143951440514415144251443514445144551446514475144851449514505145151452514535145451455514565145751458514595146051461514625146351464514655146651467514685146951470514715147251473514745147551476514775147851479514805148151482514835148451485514865148751488514895149051491514925149351494514955149651497514985149951500515015150251503515045150551506515075150851509515105151151512515135151451515515165151751518515195152051521515225152351524515255152651527515285152951530515315153251533515345153551536515375153851539515405154151542515435154451545515465154751548515495155051551515525155351554515555155651557515585155951560515615156251563515645156551566515675156851569515705157151572515735157451575515765157751578515795158051581515825158351584515855158651587515885158951590515915159251593515945159551596515975159851599516005160151602516035160451605516065160751608516095161051611516125161351614516155161651617516185161951620516215162251623516245162551626516275162851629516305163151632516335163451635516365163751638516395164051641516425164351644516455164651647516485164951650516515165251653516545165551656516575165851659516605166151662516635166451665516665166751668516695167051671516725167351674516755167651677516785167951680516815168251683516845168551686516875168851689516905169151692516935169451695516965169751698516995170051701517025170351704517055170651707517085170951710517115171251713517145171551716517175171851719517205172151722517235172451725517265172751728517295173051731517325173351734517355173651737517385173951740517415174251743517445174551746517475174851749517505175151752517535175451755517565175751758517595176051761517625176351764517655176651767517685176951770517715177251773517745177551776517775177851779517805178151782517835178451785517865178751788517895179051791517925179351794517955179651797517985179951800518015180251803518045180551806518075180851809518105181151812518135181451815518165181751818518195182051821518225182351824518255182651827518285182951830518315183251833518345183551836518375183851839518405184151842518435184451845518465184751848518495185051851518525185351854518555185651857518585185951860518615186251863518645186551866518675186851869518705187151872518735187451875518765187751878518795188051881518825188351884518855188651887518885188951890518915189251893518945189551896518975189851899519005190151902519035190451905519065190751908519095191051911519125191351914519155191651917519185191951920519215192251923519245192551926519275192851929519305193151932519335193451935519365193751938519395194051941519425194351944519455194651947519485194951950519515195251953519545195551956519575195851959519605196151962519635196451965519665196751968519695197051971519725197351974519755197651977519785197951980519815198251983519845198551986519875198851989519905199151992519935199451995519965199751998519995200052001520025200352004520055200652007520085200952010520115201252013520145201552016520175201852019520205202152022520235202452025520265202752028520295203052031520325203352034520355203652037520385203952040520415204252043520445204552046520475204852049520505205152052520535205452055520565205752058520595206052061520625206352064520655206652067520685206952070520715207252073520745207552076520775207852079520805208152082520835208452085520865208752088520895209052091520925209352094520955209652097520985209952100521015210252103521045210552106521075210852109521105211152112521135211452115521165211752118521195212052121521225212352124521255212652127521285212952130521315213252133521345213552136521375213852139521405214152142521435214452145521465214752148521495215052151521525215352154521555215652157521585215952160521615216252163521645216552166521675216852169521705217152172521735217452175521765217752178521795218052181521825218352184521855218652187521885218952190521915219252193521945219552196521975219852199522005220152202522035220452205522065220752208522095221052211522125221352214522155221652217522185221952220522215222252223522245222552226522275222852229522305223152232522335223452235522365223752238522395224052241522425224352244522455224652247522485224952250522515225252253522545225552256522575225852259522605226152262522635226452265522665226752268522695227052271522725227352274522755227652277522785227952280522815228252283522845228552286522875228852289522905229152292522935229452295522965229752298522995230052301523025230352304523055230652307523085230952310523115231252313523145231552316523175231852319523205232152322523235232452325523265232752328523295233052331523325233352334523355233652337523385233952340523415234252343523445234552346523475234852349523505235152352523535235452355523565235752358523595236052361523625236352364523655236652367523685236952370523715237252373523745237552376523775237852379523805238152382523835238452385523865238752388523895239052391523925239352394523955239652397523985239952400524015240252403524045240552406524075240852409524105241152412524135241452415524165241752418524195242052421524225242352424524255242652427524285242952430524315243252433524345243552436524375243852439524405244152442524435244452445524465244752448524495245052451524525245352454524555245652457524585245952460524615246252463524645246552466524675246852469524705247152472524735247452475524765247752478524795248052481524825248352484524855248652487524885248952490524915249252493524945249552496524975249852499525005250152502525035250452505525065250752508525095251052511525125251352514525155251652517525185251952520525215252252523525245252552526525275252852529525305253152532525335253452535525365253752538525395254052541525425254352544525455254652547525485254952550525515255252553525545255552556525575255852559525605256152562525635256452565525665256752568525695257052571525725257352574525755257652577525785257952580525815258252583525845258552586525875258852589525905259152592525935259452595525965259752598525995260052601526025260352604526055260652607526085260952610526115261252613526145261552616526175261852619526205262152622526235262452625526265262752628526295263052631526325263352634526355263652637526385263952640526415264252643526445264552646526475264852649526505265152652526535265452655526565265752658526595266052661526625266352664526655266652667526685266952670526715267252673526745267552676526775267852679526805268152682526835268452685526865268752688526895269052691526925269352694526955269652697526985269952700527015270252703527045270552706527075270852709527105271152712527135271452715527165271752718527195272052721527225272352724527255272652727527285272952730527315273252733527345273552736527375273852739527405274152742527435274452745527465274752748527495275052751527525275352754527555275652757527585275952760527615276252763527645276552766527675276852769527705277152772527735277452775527765277752778527795278052781527825278352784527855278652787527885278952790527915279252793527945279552796527975279852799528005280152802528035280452805528065280752808528095281052811528125281352814528155281652817528185281952820528215282252823528245282552826528275282852829528305283152832528335283452835528365283752838528395284052841528425284352844528455284652847528485284952850528515285252853528545285552856528575285852859528605286152862528635286452865528665286752868528695287052871528725287352874528755287652877528785287952880528815288252883528845288552886528875288852889528905289152892528935289452895528965289752898528995290052901529025290352904529055290652907529085290952910529115291252913529145291552916529175291852919529205292152922529235292452925529265292752928529295293052931529325293352934529355293652937529385293952940529415294252943529445294552946529475294852949529505295152952529535295452955529565295752958529595296052961529625296352964529655296652967529685296952970529715297252973529745297552976529775297852979529805298152982529835298452985529865298752988529895299052991529925299352994529955299652997529985299953000530015300253003530045300553006530075300853009530105301153012530135301453015530165301753018530195302053021530225302353024530255302653027530285302953030530315303253033530345303553036530375303853039530405304153042530435304453045530465304753048530495305053051530525305353054530555305653057530585305953060530615306253063530645306553066530675306853069530705307153072530735307453075530765307753078530795308053081530825308353084530855308653087530885308953090530915309253093530945309553096530975309853099531005310153102531035310453105531065310753108531095311053111531125311353114531155311653117531185311953120531215312253123531245312553126531275312853129531305313153132531335313453135531365313753138531395314053141531425314353144531455314653147531485314953150531515315253153531545315553156531575315853159531605316153162531635316453165531665316753168531695317053171531725317353174531755317653177531785317953180531815318253183531845318553186531875318853189531905319153192531935319453195531965319753198531995320053201532025320353204532055320653207532085320953210532115321253213532145321553216532175321853219532205322153222532235322453225532265322753228532295323053231532325323353234532355323653237532385323953240532415324253243532445324553246532475324853249532505325153252532535325453255532565325753258532595326053261532625326353264532655326653267532685326953270532715327253273532745327553276532775327853279532805328153282532835328453285532865328753288532895329053291532925329353294532955329653297532985329953300533015330253303533045330553306533075330853309533105331153312533135331453315533165331753318533195332053321533225332353324533255332653327533285332953330533315333253333533345333553336533375333853339533405334153342533435334453345533465334753348533495335053351533525335353354533555335653357533585335953360533615336253363533645336553366533675336853369533705337153372533735337453375533765337753378533795338053381533825338353384533855338653387533885338953390533915339253393533945339553396533975339853399534005340153402534035340453405534065340753408534095341053411534125341353414534155341653417534185341953420534215342253423534245342553426534275342853429534305343153432534335343453435534365343753438534395344053441534425344353444534455344653447534485344953450534515345253453534545345553456534575345853459534605346153462534635346453465534665346753468534695347053471534725347353474534755347653477534785347953480534815348253483534845348553486534875348853489534905349153492534935349453495534965349753498534995350053501535025350353504535055350653507535085350953510535115351253513535145351553516535175351853519535205352153522535235352453525535265352753528535295353053531535325353353534535355353653537535385353953540535415354253543535445354553546535475354853549535505355153552535535355453555535565355753558535595356053561535625356353564535655356653567535685356953570535715357253573535745357553576535775357853579535805358153582535835358453585535865358753588535895359053591535925359353594535955359653597535985359953600536015360253603536045360553606536075360853609536105361153612536135361453615536165361753618536195362053621536225362353624536255362653627536285362953630536315363253633536345363553636536375363853639536405364153642536435364453645536465364753648536495365053651536525365353654536555365653657536585365953660536615366253663536645366553666536675366853669536705367153672536735367453675536765367753678536795368053681536825368353684536855368653687536885368953690536915369253693536945369553696536975369853699537005370153702537035370453705537065370753708537095371053711537125371353714537155371653717537185371953720537215372253723537245372553726537275372853729537305373153732537335373453735537365373753738537395374053741537425374353744537455374653747537485374953750537515375253753537545375553756537575375853759537605376153762537635376453765537665376753768537695377053771537725377353774537755377653777537785377953780537815378253783537845378553786537875378853789537905379153792537935379453795537965379753798537995380053801538025380353804538055380653807538085380953810538115381253813538145381553816538175381853819538205382153822538235382453825538265382753828538295383053831538325383353834538355383653837538385383953840538415384253843538445384553846538475384853849538505385153852538535385453855538565385753858538595386053861538625386353864538655386653867538685386953870538715387253873538745387553876538775387853879538805388153882538835388453885538865388753888538895389053891538925389353894538955389653897538985389953900539015390253903539045390553906539075390853909539105391153912539135391453915539165391753918539195392053921539225392353924539255392653927539285392953930539315393253933539345393553936539375393853939539405394153942539435394453945539465394753948539495395053951539525395353954539555395653957539585395953960539615396253963539645396553966539675396853969539705397153972539735397453975539765397753978539795398053981539825398353984539855398653987539885398953990539915399253993539945399553996539975399853999540005400154002540035400454005540065400754008540095401054011540125401354014540155401654017540185401954020540215402254023540245402554026540275402854029540305403154032540335403454035540365403754038540395404054041540425404354044540455404654047540485404954050540515405254053540545405554056540575405854059540605406154062540635406454065540665406754068540695407054071540725407354074540755407654077540785407954080540815408254083540845408554086540875408854089540905409154092540935409454095540965409754098540995410054101541025410354104541055410654107541085410954110541115411254113541145411554116541175411854119541205412154122541235412454125541265412754128541295413054131541325413354134541355413654137541385413954140541415414254143541445414554146541475414854149541505415154152541535415454155541565415754158541595416054161541625416354164541655416654167541685416954170541715417254173541745417554176541775417854179541805418154182541835418454185541865418754188541895419054191541925419354194541955419654197541985419954200542015420254203542045420554206542075420854209542105421154212542135421454215542165421754218542195422054221542225422354224542255422654227542285422954230542315423254233542345423554236542375423854239542405424154242542435424454245542465424754248542495425054251542525425354254542555425654257542585425954260542615426254263542645426554266542675426854269542705427154272542735427454275542765427754278542795428054281542825428354284542855428654287542885428954290542915429254293542945429554296542975429854299543005430154302543035430454305543065430754308543095431054311543125431354314543155431654317543185431954320543215432254323543245432554326543275432854329543305433154332543335433454335543365433754338543395434054341543425434354344543455434654347543485434954350543515435254353543545435554356543575435854359543605436154362543635436454365543665436754368543695437054371543725437354374543755437654377543785437954380543815438254383543845438554386543875438854389543905439154392543935439454395543965439754398543995440054401544025440354404544055440654407544085440954410544115441254413544145441554416544175441854419544205442154422544235442454425544265442754428544295443054431544325443354434544355443654437544385443954440544415444254443544445444554446544475444854449544505445154452544535445454455544565445754458544595446054461544625446354464544655446654467544685446954470544715447254473544745447554476544775447854479544805448154482544835448454485544865448754488544895449054491544925449354494544955449654497544985449954500545015450254503545045450554506545075450854509545105451154512545135451454515545165451754518545195452054521545225452354524545255452654527545285452954530545315453254533545345453554536545375453854539545405454154542545435454454545545465454754548545495455054551545525455354554545555455654557545585455954560545615456254563545645456554566545675456854569545705457154572545735457454575545765457754578545795458054581545825458354584545855458654587545885458954590545915459254593545945459554596545975459854599546005460154602546035460454605546065460754608546095461054611546125461354614546155461654617546185461954620546215462254623546245462554626546275462854629546305463154632546335463454635546365463754638546395464054641546425464354644546455464654647546485464954650546515465254653546545465554656546575465854659546605466154662546635466454665546665466754668546695467054671546725467354674546755467654677546785467954680546815468254683546845468554686546875468854689546905469154692546935469454695546965469754698546995470054701547025470354704547055470654707547085470954710547115471254713547145471554716547175471854719547205472154722547235472454725547265472754728547295473054731547325473354734547355473654737547385473954740547415474254743547445474554746547475474854749547505475154752547535475454755547565475754758547595476054761547625476354764547655476654767547685476954770547715477254773547745477554776547775477854779547805478154782547835478454785547865478754788547895479054791547925479354794547955479654797547985479954800548015480254803548045480554806548075480854809548105481154812548135481454815548165481754818548195482054821548225482354824548255482654827548285482954830548315483254833548345483554836548375483854839548405484154842548435484454845548465484754848548495485054851548525485354854548555485654857548585485954860548615486254863548645486554866548675486854869548705487154872548735487454875548765487754878548795488054881548825488354884548855488654887548885488954890548915489254893548945489554896548975489854899549005490154902549035490454905549065490754908549095491054911549125491354914549155491654917549185491954920549215492254923549245492554926549275492854929549305493154932549335493454935549365493754938549395494054941549425494354944549455494654947549485494954950549515495254953549545495554956549575495854959549605496154962549635496454965549665496754968549695497054971549725497354974549755497654977549785497954980549815498254983549845498554986549875498854989549905499154992549935499454995549965499754998549995500055001550025500355004550055500655007550085500955010550115501255013550145501555016550175501855019550205502155022550235502455025550265502755028550295503055031550325503355034550355503655037550385503955040550415504255043550445504555046550475504855049550505505155052550535505455055550565505755058550595506055061550625506355064550655506655067550685506955070550715507255073550745507555076550775507855079550805508155082550835508455085550865508755088550895509055091550925509355094550955509655097550985509955100551015510255103551045510555106551075510855109551105511155112551135511455115551165511755118551195512055121551225512355124551255512655127551285512955130551315513255133551345513555136551375513855139551405514155142551435514455145551465514755148551495515055151551525515355154551555515655157551585515955160551615516255163551645516555166551675516855169551705517155172551735517455175551765517755178551795518055181551825518355184551855518655187551885518955190551915519255193551945519555196551975519855199552005520155202552035520455205552065520755208552095521055211552125521355214552155521655217552185521955220552215522255223552245522555226552275522855229552305523155232552335523455235552365523755238552395524055241552425524355244552455524655247552485524955250552515525255253552545525555256552575525855259552605526155262552635526455265552665526755268552695527055271552725527355274552755527655277552785527955280552815528255283552845528555286552875528855289552905529155292552935529455295552965529755298552995530055301553025530355304553055530655307553085530955310553115531255313553145531555316553175531855319553205532155322553235532455325553265532755328553295533055331553325533355334553355533655337553385533955340553415534255343553445534555346553475534855349553505535155352553535535455355553565535755358553595536055361553625536355364553655536655367553685536955370553715537255373553745537555376553775537855379553805538155382553835538455385553865538755388553895539055391553925539355394553955539655397553985539955400554015540255403554045540555406554075540855409554105541155412554135541455415554165541755418554195542055421554225542355424554255542655427554285542955430554315543255433554345543555436554375543855439554405544155442554435544455445554465544755448554495545055451554525545355454554555545655457554585545955460554615546255463554645546555466554675546855469554705547155472554735547455475554765547755478554795548055481554825548355484554855548655487554885548955490554915549255493554945549555496554975549855499555005550155502555035550455505555065550755508555095551055511555125551355514555155551655517555185551955520555215552255523555245552555526555275552855529555305553155532555335553455535555365553755538555395554055541555425554355544555455554655547555485554955550555515555255553555545555555556555575555855559555605556155562555635556455565555665556755568555695557055571555725557355574555755557655577555785557955580555815558255583555845558555586555875558855589555905559155592555935559455595555965559755598555995560055601556025560355604556055560655607556085560955610556115561255613556145561555616556175561855619556205562155622556235562455625556265562755628556295563055631556325563355634556355563655637556385563955640556415564255643556445564555646556475564855649556505565155652556535565455655556565565755658556595566055661556625566355664556655566655667556685566955670556715567255673556745567555676556775567855679556805568155682556835568455685556865568755688556895569055691556925569355694556955569655697556985569955700557015570255703557045570555706557075570855709557105571155712557135571455715557165571755718557195572055721557225572355724557255572655727557285572955730557315573255733557345573555736557375573855739557405574155742557435574455745557465574755748557495575055751557525575355754557555575655757557585575955760557615576255763557645576555766557675576855769557705577155772557735577455775557765577755778557795578055781557825578355784557855578655787557885578955790557915579255793557945579555796557975579855799558005580155802558035580455805558065580755808558095581055811558125581355814558155581655817558185581955820558215582255823558245582555826558275582855829558305583155832558335583455835558365583755838558395584055841558425584355844558455584655847558485584955850558515585255853558545585555856558575585855859558605586155862558635586455865558665586755868558695587055871558725587355874558755587655877558785587955880558815588255883558845588555886558875588855889558905589155892558935589455895558965589755898558995590055901559025590355904559055590655907559085590955910559115591255913559145591555916559175591855919559205592155922559235592455925559265592755928559295593055931559325593355934559355593655937559385593955940559415594255943559445594555946559475594855949559505595155952559535595455955559565595755958559595596055961559625596355964559655596655967559685596955970559715597255973559745597555976559775597855979559805598155982559835598455985559865598755988559895599055991559925599355994559955599655997559985599956000560015600256003560045600556006560075600856009560105601156012560135601456015560165601756018560195602056021560225602356024560255602656027560285602956030560315603256033560345603556036560375603856039560405604156042560435604456045560465604756048560495605056051560525605356054560555605656057560585605956060560615606256063560645606556066560675606856069560705607156072560735607456075560765607756078560795608056081560825608356084560855608656087560885608956090560915609256093560945609556096560975609856099561005610156102561035610456105561065610756108561095611056111561125611356114561155611656117561185611956120561215612256123561245612556126561275612856129561305613156132561335613456135561365613756138561395614056141561425614356144561455614656147561485614956150561515615256153561545615556156561575615856159561605616156162561635616456165561665616756168561695617056171561725617356174561755617656177561785617956180561815618256183561845618556186561875618856189561905619156192561935619456195561965619756198561995620056201562025620356204562055620656207562085620956210562115621256213562145621556216562175621856219562205622156222562235622456225562265622756228562295623056231562325623356234562355623656237562385623956240562415624256243562445624556246562475624856249562505625156252562535625456255562565625756258562595626056261562625626356264562655626656267562685626956270562715627256273562745627556276562775627856279562805628156282562835628456285562865628756288562895629056291562925629356294562955629656297562985629956300563015630256303563045630556306563075630856309563105631156312563135631456315563165631756318563195632056321563225632356324563255632656327563285632956330563315633256333563345633556336563375633856339563405634156342563435634456345563465634756348563495635056351563525635356354563555635656357563585635956360563615636256363563645636556366563675636856369563705637156372563735637456375563765637756378563795638056381563825638356384563855638656387563885638956390563915639256393563945639556396563975639856399564005640156402564035640456405564065640756408564095641056411564125641356414564155641656417564185641956420564215642256423564245642556426564275642856429564305643156432564335643456435564365643756438564395644056441564425644356444564455644656447564485644956450564515645256453564545645556456564575645856459564605646156462564635646456465564665646756468564695647056471564725647356474564755647656477564785647956480564815648256483564845648556486564875648856489564905649156492564935649456495564965649756498564995650056501565025650356504565055650656507565085650956510565115651256513565145651556516565175651856519565205652156522565235652456525565265652756528565295653056531565325653356534565355653656537565385653956540565415654256543565445654556546565475654856549565505655156552565535655456555565565655756558565595656056561565625656356564565655656656567565685656956570565715657256573565745657556576565775657856579565805658156582565835658456585565865658756588565895659056591565925659356594565955659656597565985659956600566015660256603566045660556606566075660856609566105661156612566135661456615566165661756618566195662056621566225662356624566255662656627566285662956630566315663256633566345663556636566375663856639566405664156642566435664456645566465664756648566495665056651566525665356654566555665656657566585665956660566615666256663566645666556666566675666856669566705667156672566735667456675566765667756678566795668056681566825668356684566855668656687566885668956690566915669256693566945669556696566975669856699567005670156702567035670456705567065670756708567095671056711567125671356714567155671656717567185671956720567215672256723567245672556726567275672856729567305673156732567335673456735567365673756738567395674056741567425674356744567455674656747567485674956750567515675256753567545675556756567575675856759567605676156762567635676456765567665676756768567695677056771567725677356774567755677656777567785677956780567815678256783567845678556786567875678856789567905679156792567935679456795567965679756798567995680056801568025680356804568055680656807568085680956810568115681256813568145681556816568175681856819568205682156822568235682456825568265682756828568295683056831568325683356834568355683656837568385683956840568415684256843568445684556846568475684856849568505685156852568535685456855568565685756858568595686056861568625686356864568655686656867568685686956870568715687256873568745687556876568775687856879568805688156882568835688456885568865688756888568895689056891568925689356894568955689656897568985689956900569015690256903569045690556906569075690856909569105691156912569135691456915569165691756918569195692056921569225692356924569255692656927569285692956930569315693256933569345693556936569375693856939569405694156942569435694456945569465694756948569495695056951569525695356954569555695656957569585695956960569615696256963569645696556966569675696856969569705697156972569735697456975569765697756978569795698056981569825698356984569855698656987569885698956990569915699256993569945699556996569975699856999570005700157002570035700457005570065700757008570095701057011570125701357014570155701657017570185701957020570215702257023570245702557026570275702857029570305703157032570335703457035570365703757038570395704057041570425704357044570455704657047570485704957050570515705257053570545705557056570575705857059570605706157062570635706457065570665706757068570695707057071570725707357074570755707657077570785707957080570815708257083570845708557086570875708857089570905709157092570935709457095570965709757098570995710057101571025710357104571055710657107571085710957110571115711257113571145711557116571175711857119571205712157122571235712457125571265712757128571295713057131571325713357134571355713657137571385713957140571415714257143571445714557146571475714857149571505715157152571535715457155571565715757158571595716057161571625716357164571655716657167571685716957170571715717257173571745717557176571775717857179571805718157182571835718457185571865718757188571895719057191571925719357194571955719657197571985719957200572015720257203572045720557206572075720857209572105721157212572135721457215572165721757218572195722057221572225722357224572255722657227572285722957230572315723257233572345723557236572375723857239572405724157242572435724457245572465724757248572495725057251572525725357254572555725657257572585725957260572615726257263572645726557266572675726857269572705727157272572735727457275572765727757278572795728057281572825728357284572855728657287572885728957290572915729257293572945729557296572975729857299573005730157302573035730457305573065730757308573095731057311573125731357314573155731657317573185731957320573215732257323573245732557326573275732857329573305733157332573335733457335573365733757338573395734057341573425734357344573455734657347573485734957350573515735257353573545735557356573575735857359573605736157362573635736457365573665736757368573695737057371573725737357374573755737657377573785737957380573815738257383573845738557386573875738857389573905739157392573935739457395573965739757398573995740057401574025740357404574055740657407574085740957410574115741257413574145741557416574175741857419574205742157422574235742457425574265742757428574295743057431574325743357434574355743657437574385743957440574415744257443574445744557446574475744857449574505745157452574535745457455574565745757458574595746057461574625746357464574655746657467574685746957470574715747257473574745747557476574775747857479574805748157482574835748457485574865748757488574895749057491574925749357494574955749657497574985749957500575015750257503575045750557506575075750857509575105751157512575135751457515575165751757518575195752057521575225752357524575255752657527575285752957530575315753257533575345753557536575375753857539575405754157542575435754457545575465754757548575495755057551575525755357554575555755657557575585755957560575615756257563575645756557566575675756857569575705757157572575735757457575575765757757578575795758057581575825758357584575855758657587575885758957590575915759257593575945759557596575975759857599576005760157602576035760457605576065760757608576095761057611576125761357614576155761657617576185761957620576215762257623576245762557626576275762857629576305763157632576335763457635576365763757638576395764057641576425764357644576455764657647576485764957650576515765257653576545765557656576575765857659576605766157662576635766457665576665766757668576695767057671576725767357674576755767657677576785767957680576815768257683576845768557686576875768857689576905769157692576935769457695576965769757698576995770057701577025770357704577055770657707577085770957710577115771257713577145771557716577175771857719577205772157722577235772457725577265772757728577295773057731577325773357734577355773657737577385773957740577415774257743577445774557746577475774857749577505775157752577535775457755577565775757758577595776057761577625776357764577655776657767577685776957770577715777257773577745777557776577775777857779577805778157782577835778457785577865778757788577895779057791577925779357794577955779657797577985779957800578015780257803578045780557806578075780857809578105781157812578135781457815578165781757818578195782057821578225782357824578255782657827578285782957830578315783257833578345783557836578375783857839578405784157842578435784457845578465784757848578495785057851578525785357854578555785657857578585785957860578615786257863578645786557866578675786857869578705787157872578735787457875578765787757878578795788057881578825788357884578855788657887578885788957890578915789257893578945789557896578975789857899579005790157902579035790457905579065790757908579095791057911579125791357914579155791657917579185791957920579215792257923579245792557926579275792857929579305793157932579335793457935579365793757938579395794057941579425794357944579455794657947579485794957950579515795257953579545795557956579575795857959579605796157962579635796457965579665796757968579695797057971579725797357974579755797657977579785797957980579815798257983579845798557986579875798857989579905799157992579935799457995579965799757998579995800058001580025800358004580055800658007580085800958010580115801258013580145801558016580175801858019580205802158022580235802458025580265802758028580295803058031580325803358034580355803658037580385803958040580415804258043580445804558046580475804858049580505805158052580535805458055580565805758058580595806058061580625806358064580655806658067580685806958070580715807258073580745807558076580775807858079580805808158082580835808458085580865808758088580895809058091580925809358094580955809658097580985809958100581015810258103581045810558106581075810858109581105811158112581135811458115581165811758118581195812058121581225812358124581255812658127581285812958130581315813258133581345813558136581375813858139581405814158142581435814458145581465814758148581495815058151581525815358154581555815658157581585815958160581615816258163581645816558166581675816858169581705817158172581735817458175581765817758178581795818058181581825818358184581855818658187581885818958190581915819258193581945819558196581975819858199582005820158202582035820458205582065820758208582095821058211582125821358214582155821658217582185821958220582215822258223582245822558226582275822858229582305823158232582335823458235582365823758238582395824058241582425824358244582455824658247582485824958250582515825258253582545825558256582575825858259582605826158262582635826458265582665826758268582695827058271582725827358274582755827658277582785827958280582815828258283582845828558286582875828858289582905829158292582935829458295582965829758298582995830058301583025830358304583055830658307583085830958310583115831258313583145831558316583175831858319583205832158322583235832458325583265832758328583295833058331583325833358334583355833658337583385833958340583415834258343583445834558346583475834858349583505835158352583535835458355583565835758358583595836058361583625836358364583655836658367583685836958370583715837258373583745837558376583775837858379583805838158382583835838458385583865838758388583895839058391583925839358394583955839658397583985839958400584015840258403584045840558406584075840858409584105841158412584135841458415584165841758418584195842058421584225842358424584255842658427584285842958430584315843258433584345843558436584375843858439584405844158442584435844458445584465844758448584495845058451584525845358454584555845658457584585845958460584615846258463584645846558466584675846858469584705847158472584735847458475584765847758478584795848058481584825848358484584855848658487584885848958490584915849258493584945849558496584975849858499585005850158502585035850458505585065850758508585095851058511585125851358514585155851658517585185851958520585215852258523585245852558526585275852858529585305853158532585335853458535585365853758538585395854058541585425854358544585455854658547585485854958550585515855258553585545855558556585575855858559585605856158562585635856458565585665856758568585695857058571585725857358574585755857658577585785857958580585815858258583585845858558586585875858858589585905859158592585935859458595585965859758598585995860058601586025860358604586055860658607586085860958610586115861258613586145861558616586175861858619586205862158622586235862458625586265862758628586295863058631586325863358634586355863658637586385863958640586415864258643586445864558646586475864858649586505865158652586535865458655586565865758658586595866058661586625866358664586655866658667586685866958670586715867258673586745867558676586775867858679586805868158682586835868458685586865868758688586895869058691586925869358694586955869658697586985869958700587015870258703587045870558706587075870858709587105871158712587135871458715587165871758718587195872058721587225872358724587255872658727587285872958730587315873258733587345873558736587375873858739587405874158742587435874458745587465874758748587495875058751587525875358754587555875658757587585875958760587615876258763587645876558766587675876858769587705877158772587735877458775587765877758778587795878058781587825878358784587855878658787587885878958790587915879258793587945879558796587975879858799588005880158802588035880458805588065880758808588095881058811588125881358814588155881658817588185881958820588215882258823588245882558826588275882858829588305883158832588335883458835588365883758838588395884058841588425884358844588455884658847588485884958850588515885258853588545885558856588575885858859588605886158862588635886458865588665886758868588695887058871588725887358874588755887658877588785887958880588815888258883588845888558886588875888858889588905889158892588935889458895588965889758898588995890058901589025890358904589055890658907589085890958910589115891258913589145891558916589175891858919589205892158922589235892458925589265892758928589295893058931589325893358934589355893658937589385893958940589415894258943589445894558946589475894858949589505895158952589535895458955589565895758958589595896058961589625896358964589655896658967589685896958970589715897258973589745897558976589775897858979589805898158982589835898458985589865898758988589895899058991589925899358994589955899658997589985899959000590015900259003590045900559006590075900859009590105901159012590135901459015590165901759018590195902059021590225902359024590255902659027590285902959030590315903259033590345903559036590375903859039590405904159042590435904459045590465904759048590495905059051590525905359054590555905659057590585905959060590615906259063590645906559066590675906859069590705907159072590735907459075590765907759078590795908059081590825908359084590855908659087590885908959090590915909259093590945909559096590975909859099591005910159102591035910459105591065910759108591095911059111591125911359114591155911659117591185911959120591215912259123591245912559126591275912859129591305913159132591335913459135591365913759138591395914059141591425914359144591455914659147591485914959150591515915259153591545915559156591575915859159591605916159162591635916459165591665916759168591695917059171591725917359174591755917659177591785917959180591815918259183591845918559186591875918859189591905919159192591935919459195591965919759198591995920059201592025920359204592055920659207592085920959210592115921259213592145921559216592175921859219592205922159222592235922459225592265922759228592295923059231592325923359234592355923659237592385923959240592415924259243592445924559246592475924859249592505925159252592535925459255592565925759258592595926059261592625926359264592655926659267592685926959270592715927259273592745927559276592775927859279592805928159282592835928459285592865928759288592895929059291592925929359294592955929659297592985929959300593015930259303593045930559306593075930859309593105931159312593135931459315593165931759318593195932059321593225932359324593255932659327593285932959330593315933259333593345933559336593375933859339593405934159342593435934459345593465934759348593495935059351593525935359354593555935659357593585935959360593615936259363593645936559366593675936859369593705937159372593735937459375593765937759378593795938059381593825938359384593855938659387593885938959390593915939259393593945939559396593975939859399594005940159402594035940459405594065940759408594095941059411594125941359414594155941659417594185941959420594215942259423594245942559426594275942859429594305943159432594335943459435594365943759438594395944059441594425944359444594455944659447594485944959450594515945259453594545945559456594575945859459594605946159462594635946459465594665946759468594695947059471594725947359474594755947659477594785947959480594815948259483594845948559486594875948859489594905949159492594935949459495594965949759498594995950059501595025950359504595055950659507595085950959510595115951259513595145951559516595175951859519595205952159522595235952459525595265952759528595295953059531595325953359534595355953659537595385953959540595415954259543595445954559546595475954859549595505955159552595535955459555595565955759558595595956059561595625956359564595655956659567595685956959570595715957259573595745957559576595775957859579595805958159582595835958459585595865958759588595895959059591595925959359594595955959659597595985959959600596015960259603596045960559606596075960859609596105961159612596135961459615596165961759618596195962059621596225962359624596255962659627596285962959630596315963259633596345963559636596375963859639596405964159642596435964459645596465964759648596495965059651596525965359654596555965659657596585965959660596615966259663596645966559666596675966859669596705967159672596735967459675596765967759678596795968059681596825968359684596855968659687596885968959690596915969259693596945969559696596975969859699597005970159702597035970459705597065970759708597095971059711597125971359714597155971659717597185971959720597215972259723597245972559726597275972859729597305973159732597335973459735597365973759738597395974059741597425974359744597455974659747597485974959750597515975259753597545975559756597575975859759597605976159762597635976459765597665976759768597695977059771597725977359774597755977659777597785977959780597815978259783597845978559786597875978859789597905979159792597935979459795597965979759798597995980059801598025980359804598055980659807598085980959810598115981259813598145981559816598175981859819598205982159822598235982459825598265982759828598295983059831598325983359834598355983659837598385983959840598415984259843598445984559846598475984859849598505985159852598535985459855598565985759858598595986059861598625986359864598655986659867598685986959870598715987259873598745987559876598775987859879598805988159882598835988459885598865988759888598895989059891598925989359894598955989659897598985989959900599015990259903599045990559906599075990859909599105991159912599135991459915599165991759918599195992059921599225992359924599255992659927599285992959930599315993259933599345993559936599375993859939599405994159942599435994459945599465994759948599495995059951599525995359954599555995659957599585995959960599615996259963599645996559966599675996859969599705997159972599735997459975599765997759978599795998059981599825998359984599855998659987599885998959990599915999259993599945999559996599975999859999600006000160002600036000460005600066000760008600096001060011600126001360014600156001660017600186001960020600216002260023600246002560026600276002860029600306003160032600336003460035600366003760038600396004060041600426004360044600456004660047600486004960050600516005260053600546005560056600576005860059600606006160062600636006460065600666006760068600696007060071600726007360074600756007660077600786007960080600816008260083600846008560086600876008860089600906009160092600936009460095600966009760098600996010060101601026010360104601056010660107601086010960110601116011260113601146011560116601176011860119601206012160122601236012460125601266012760128601296013060131601326013360134601356013660137601386013960140601416014260143601446014560146601476014860149601506015160152601536015460155601566015760158601596016060161601626016360164601656016660167601686016960170601716017260173601746017560176601776017860179601806018160182601836018460185601866018760188601896019060191601926019360194601956019660197601986019960200602016020260203602046020560206602076020860209602106021160212602136021460215602166021760218602196022060221602226022360224602256022660227602286022960230602316023260233602346023560236602376023860239602406024160242602436024460245602466024760248602496025060251602526025360254602556025660257602586025960260602616026260263602646026560266602676026860269602706027160272602736027460275602766027760278602796028060281602826028360284602856028660287602886028960290602916029260293602946029560296602976029860299603006030160302603036030460305603066030760308603096031060311603126031360314603156031660317603186031960320603216032260323603246032560326603276032860329603306033160332603336033460335603366033760338603396034060341603426034360344603456034660347603486034960350603516035260353603546035560356603576035860359603606036160362603636036460365603666036760368603696037060371603726037360374603756037660377603786037960380603816038260383603846038560386603876038860389603906039160392603936039460395603966039760398603996040060401604026040360404604056040660407604086040960410604116041260413604146041560416604176041860419604206042160422604236042460425604266042760428604296043060431604326043360434604356043660437604386043960440604416044260443604446044560446604476044860449604506045160452604536045460455604566045760458604596046060461604626046360464604656046660467604686046960470604716047260473604746047560476604776047860479604806048160482604836048460485604866048760488604896049060491604926049360494604956049660497604986049960500605016050260503605046050560506605076050860509605106051160512605136051460515605166051760518605196052060521605226052360524605256052660527605286052960530605316053260533605346053560536605376053860539605406054160542605436054460545605466054760548605496055060551605526055360554605556055660557605586055960560605616056260563605646056560566605676056860569605706057160572605736057460575605766057760578605796058060581605826058360584605856058660587605886058960590605916059260593605946059560596605976059860599606006060160602606036060460605606066060760608606096061060611606126061360614606156061660617606186061960620606216062260623606246062560626606276062860629606306063160632606336063460635606366063760638606396064060641606426064360644606456064660647606486064960650606516065260653606546065560656606576065860659606606066160662606636066460665606666066760668606696067060671606726067360674606756067660677606786067960680606816068260683606846068560686606876068860689606906069160692606936069460695606966069760698606996070060701607026070360704607056070660707607086070960710607116071260713607146071560716607176071860719607206072160722607236072460725607266072760728607296073060731607326073360734607356073660737607386073960740607416074260743607446074560746607476074860749607506075160752607536075460755607566075760758607596076060761607626076360764607656076660767607686076960770607716077260773607746077560776607776077860779607806078160782607836078460785607866078760788607896079060791607926079360794607956079660797607986079960800608016080260803608046080560806608076080860809608106081160812608136081460815608166081760818608196082060821608226082360824608256082660827608286082960830608316083260833608346083560836608376083860839608406084160842608436084460845608466084760848608496085060851608526085360854608556085660857608586085960860608616086260863608646086560866608676086860869608706087160872608736087460875608766087760878608796088060881608826088360884608856088660887608886088960890608916089260893608946089560896608976089860899609006090160902609036090460905609066090760908609096091060911609126091360914609156091660917609186091960920609216092260923609246092560926609276092860929609306093160932609336093460935609366093760938609396094060941609426094360944609456094660947609486094960950609516095260953609546095560956609576095860959609606096160962609636096460965609666096760968609696097060971609726097360974609756097660977609786097960980609816098260983609846098560986609876098860989609906099160992609936099460995609966099760998609996100061001610026100361004610056100661007610086100961010610116101261013610146101561016610176101861019610206102161022610236102461025610266102761028610296103061031610326103361034610356103661037610386103961040610416104261043610446104561046610476104861049610506105161052610536105461055610566105761058610596106061061610626106361064610656106661067610686106961070610716107261073610746107561076610776107861079610806108161082610836108461085610866108761088610896109061091610926109361094610956109661097610986109961100611016110261103611046110561106611076110861109611106111161112611136111461115611166111761118611196112061121611226112361124611256112661127611286112961130611316113261133611346113561136611376113861139611406114161142611436114461145611466114761148611496115061151611526115361154611556115661157611586115961160611616116261163611646116561166611676116861169611706117161172611736117461175611766117761178611796118061181611826118361184611856118661187611886118961190611916119261193611946119561196611976119861199612006120161202612036120461205612066120761208612096121061211612126121361214612156121661217612186121961220612216122261223612246122561226612276122861229612306123161232612336123461235612366123761238612396124061241612426124361244612456124661247612486124961250612516125261253612546125561256612576125861259612606126161262612636126461265612666126761268612696127061271612726127361274612756127661277612786127961280612816128261283612846128561286612876128861289612906129161292612936129461295612966129761298612996130061301613026130361304613056130661307613086130961310613116131261313613146131561316613176131861319613206132161322613236132461325613266132761328613296133061331613326133361334613356133661337613386133961340613416134261343613446134561346613476134861349613506135161352613536135461355613566135761358613596136061361613626136361364613656136661367613686136961370613716137261373613746137561376613776137861379613806138161382613836138461385613866138761388613896139061391613926139361394613956139661397613986139961400614016140261403614046140561406614076140861409614106141161412614136141461415614166141761418614196142061421614226142361424614256142661427614286142961430614316143261433614346143561436614376143861439614406144161442614436144461445614466144761448614496145061451614526145361454614556145661457614586145961460614616146261463614646146561466614676146861469614706147161472614736147461475614766147761478614796148061481614826148361484614856148661487614886148961490614916149261493614946149561496614976149861499615006150161502615036150461505615066150761508615096151061511615126151361514615156151661517615186151961520615216152261523615246152561526615276152861529615306153161532615336153461535615366153761538615396154061541615426154361544615456154661547615486154961550615516155261553615546155561556615576155861559615606156161562615636156461565615666156761568615696157061571615726157361574615756157661577615786157961580615816158261583615846158561586615876158861589615906159161592615936159461595615966159761598615996160061601616026160361604616056160661607616086160961610616116161261613616146161561616616176161861619616206162161622616236162461625616266162761628616296163061631616326163361634616356163661637616386163961640616416164261643616446164561646616476164861649616506165161652616536165461655616566165761658616596166061661616626166361664616656166661667616686166961670616716167261673616746167561676616776167861679616806168161682616836168461685616866168761688616896169061691616926169361694616956169661697616986169961700617016170261703617046170561706617076170861709617106171161712617136171461715617166171761718617196172061721617226172361724617256172661727617286172961730617316173261733617346173561736617376173861739617406174161742617436174461745617466174761748617496175061751617526175361754617556175661757617586175961760617616176261763617646176561766617676176861769617706177161772617736177461775617766177761778617796178061781617826178361784617856178661787617886178961790617916179261793617946179561796617976179861799618006180161802618036180461805618066180761808618096181061811618126181361814618156181661817618186181961820618216182261823618246182561826618276182861829618306183161832618336183461835618366183761838618396184061841618426184361844618456184661847618486184961850618516185261853618546185561856618576185861859618606186161862618636186461865618666186761868618696187061871618726187361874618756187661877618786187961880618816188261883618846188561886618876188861889618906189161892618936189461895618966189761898618996190061901619026190361904619056190661907619086190961910619116191261913619146191561916619176191861919619206192161922619236192461925619266192761928619296193061931619326193361934619356193661937619386193961940619416194261943619446194561946619476194861949619506195161952619536195461955619566195761958619596196061961619626196361964619656196661967619686196961970619716197261973619746197561976619776197861979619806198161982619836198461985619866198761988619896199061991619926199361994619956199661997619986199962000620016200262003620046200562006620076200862009620106201162012620136201462015620166201762018620196202062021620226202362024620256202662027620286202962030620316203262033620346203562036620376203862039620406204162042620436204462045620466204762048620496205062051620526205362054620556205662057620586205962060620616206262063620646206562066620676206862069620706207162072620736207462075620766207762078620796208062081620826208362084620856208662087620886208962090620916209262093620946209562096620976209862099621006210162102621036210462105621066210762108621096211062111621126211362114621156211662117621186211962120621216212262123621246212562126621276212862129621306213162132621336213462135621366213762138621396214062141621426214362144621456214662147621486214962150621516215262153621546215562156621576215862159621606216162162621636216462165621666216762168621696217062171621726217362174621756217662177621786217962180621816218262183621846218562186621876218862189621906219162192621936219462195621966219762198621996220062201622026220362204622056220662207622086220962210622116221262213622146221562216622176221862219622206222162222622236222462225622266222762228622296223062231622326223362234622356223662237622386223962240622416224262243622446224562246622476224862249622506225162252622536225462255622566225762258622596226062261622626226362264622656226662267622686226962270622716227262273622746227562276622776227862279622806228162282622836228462285622866228762288622896229062291622926229362294622956229662297622986229962300623016230262303623046230562306623076230862309623106231162312623136231462315623166231762318623196232062321623226232362324623256232662327623286232962330623316233262333623346233562336623376233862339623406234162342623436234462345623466234762348623496235062351623526235362354623556235662357623586235962360623616236262363623646236562366623676236862369623706237162372623736237462375623766237762378623796238062381623826238362384623856238662387623886238962390623916239262393623946239562396623976239862399624006240162402624036240462405624066240762408624096241062411624126241362414624156241662417624186241962420624216242262423624246242562426624276242862429624306243162432624336243462435624366243762438624396244062441624426244362444624456244662447624486244962450624516245262453624546245562456624576245862459624606246162462624636246462465624666246762468624696247062471624726247362474624756247662477624786247962480624816248262483624846248562486624876248862489624906249162492624936249462495624966249762498624996250062501625026250362504625056250662507625086250962510625116251262513625146251562516625176251862519625206252162522625236252462525625266252762528625296253062531625326253362534625356253662537625386253962540625416254262543625446254562546625476254862549625506255162552625536255462555625566255762558625596256062561625626256362564625656256662567625686256962570625716257262573625746257562576625776257862579625806258162582625836258462585625866258762588625896259062591625926259362594625956259662597625986259962600626016260262603626046260562606626076260862609626106261162612626136261462615626166261762618626196262062621626226262362624626256262662627626286262962630626316263262633626346263562636626376263862639626406264162642626436264462645626466264762648626496265062651626526265362654626556265662657626586265962660626616266262663626646266562666626676266862669626706267162672626736267462675626766267762678626796268062681626826268362684626856268662687626886268962690626916269262693626946269562696626976269862699627006270162702627036270462705627066270762708627096271062711627126271362714627156271662717627186271962720627216272262723627246272562726627276272862729627306273162732627336273462735627366273762738627396274062741627426274362744627456274662747627486274962750627516275262753627546275562756627576275862759627606276162762627636276462765627666276762768627696277062771627726277362774627756277662777627786277962780627816278262783627846278562786627876278862789627906279162792627936279462795627966279762798627996280062801628026280362804628056280662807628086280962810628116281262813628146281562816628176281862819628206282162822628236282462825628266282762828628296283062831628326283362834628356283662837628386283962840628416284262843628446284562846628476284862849628506285162852628536285462855628566285762858628596286062861628626286362864628656286662867628686286962870628716287262873628746287562876628776287862879628806288162882628836288462885628866288762888628896289062891628926289362894628956289662897628986289962900629016290262903629046290562906629076290862909629106291162912629136291462915629166291762918629196292062921629226292362924629256292662927629286292962930629316293262933629346293562936629376293862939629406294162942629436294462945629466294762948629496295062951629526295362954629556295662957629586295962960629616296262963629646296562966629676296862969629706297162972629736297462975629766297762978629796298062981629826298362984629856298662987629886298962990629916299262993629946299562996629976299862999630006300163002630036300463005630066300763008630096301063011630126301363014630156301663017630186301963020630216302263023630246302563026630276302863029630306303163032630336303463035630366303763038630396304063041630426304363044630456304663047630486304963050630516305263053630546305563056630576305863059630606306163062630636306463065630666306763068630696307063071630726307363074630756307663077630786307963080630816308263083630846308563086630876308863089630906309163092630936309463095630966309763098630996310063101631026310363104631056310663107631086310963110631116311263113631146311563116631176311863119631206312163122631236312463125631266312763128631296313063131631326313363134631356313663137631386313963140631416314263143631446314563146631476314863149631506315163152631536315463155631566315763158631596316063161631626316363164631656316663167631686316963170631716317263173631746317563176631776317863179631806318163182631836318463185631866318763188631896319063191631926319363194631956319663197631986319963200632016320263203632046320563206632076320863209632106321163212632136321463215632166321763218632196322063221632226322363224632256322663227632286322963230632316323263233632346323563236632376323863239632406324163242632436324463245632466324763248632496325063251632526325363254632556325663257632586325963260632616326263263632646326563266632676326863269632706327163272632736327463275632766327763278632796328063281632826328363284632856328663287632886328963290632916329263293632946329563296632976329863299633006330163302633036330463305633066330763308633096331063311633126331363314633156331663317633186331963320633216332263323633246332563326633276332863329633306333163332633336333463335633366333763338633396334063341633426334363344633456334663347633486334963350633516335263353633546335563356633576335863359633606336163362633636336463365633666336763368633696337063371633726337363374633756337663377633786337963380633816338263383633846338563386633876338863389633906339163392633936339463395633966339763398633996340063401634026340363404634056340663407634086340963410634116341263413634146341563416634176341863419634206342163422634236342463425634266342763428634296343063431634326343363434634356343663437634386343963440634416344263443634446344563446634476344863449634506345163452634536345463455634566345763458634596346063461634626346363464634656346663467634686346963470634716347263473634746347563476634776347863479634806348163482634836348463485634866348763488634896349063491634926349363494634956349663497634986349963500635016350263503635046350563506635076350863509635106351163512635136351463515635166351763518635196352063521635226352363524635256352663527635286352963530635316353263533635346353563536635376353863539635406354163542635436354463545635466354763548635496355063551635526355363554635556355663557635586355963560635616356263563635646356563566635676356863569635706357163572635736357463575635766357763578635796358063581635826358363584635856358663587635886358963590635916359263593635946359563596635976359863599636006360163602636036360463605636066360763608636096361063611636126361363614636156361663617636186361963620636216362263623636246362563626636276362863629636306363163632636336363463635636366363763638636396364063641636426364363644636456364663647636486364963650636516365263653636546365563656636576365863659636606366163662636636366463665636666366763668636696367063671636726367363674636756367663677636786367963680636816368263683636846368563686636876368863689636906369163692636936369463695636966369763698636996370063701637026370363704637056370663707637086370963710637116371263713637146371563716637176371863719637206372163722637236372463725637266372763728637296373063731637326373363734637356373663737637386373963740637416374263743637446374563746637476374863749637506375163752637536375463755637566375763758637596376063761637626376363764637656376663767637686376963770637716377263773637746377563776637776377863779637806378163782637836378463785637866378763788637896379063791637926379363794637956379663797637986379963800638016380263803638046380563806638076380863809638106381163812638136381463815638166381763818638196382063821638226382363824638256382663827638286382963830638316383263833638346383563836638376383863839638406384163842638436384463845638466384763848638496385063851638526385363854638556385663857638586385963860638616386263863638646386563866638676386863869638706387163872638736387463875638766387763878638796388063881638826388363884638856388663887638886388963890638916389263893638946389563896638976389863899639006390163902639036390463905639066390763908639096391063911639126391363914639156391663917639186391963920639216392263923639246392563926639276392863929639306393163932639336393463935639366393763938639396394063941639426394363944639456394663947639486394963950639516395263953639546395563956639576395863959639606396163962639636396463965639666396763968639696397063971639726397363974639756397663977639786397963980639816398263983639846398563986639876398863989639906399163992639936399463995639966399763998639996400064001640026400364004640056400664007640086400964010640116401264013640146401564016640176401864019640206402164022640236402464025640266402764028640296403064031640326403364034640356403664037640386403964040640416404264043640446404564046640476404864049640506405164052640536405464055640566405764058640596406064061640626406364064640656406664067640686406964070640716407264073640746407564076640776407864079640806408164082640836408464085640866408764088640896409064091640926409364094640956409664097640986409964100641016410264103641046410564106641076410864109641106411164112641136411464115641166411764118641196412064121641226412364124641256412664127641286412964130641316413264133641346413564136641376413864139641406414164142641436414464145641466414764148641496415064151641526415364154641556415664157641586415964160641616416264163641646416564166641676416864169641706417164172641736417464175641766417764178641796418064181641826418364184641856418664187641886418964190641916419264193641946419564196641976419864199642006420164202642036420464205642066420764208642096421064211642126421364214642156421664217642186421964220642216422264223642246422564226642276422864229642306423164232642336423464235642366423764238642396424064241642426424364244642456424664247642486424964250642516425264253642546425564256642576425864259642606426164262642636426464265642666426764268642696427064271642726427364274642756427664277642786427964280642816428264283642846428564286642876428864289642906429164292642936429464295642966429764298642996430064301643026430364304643056430664307643086430964310643116431264313643146431564316643176431864319643206432164322643236432464325643266432764328643296433064331643326433364334643356433664337643386433964340643416434264343643446434564346643476434864349643506435164352643536435464355643566435764358643596436064361643626436364364643656436664367643686436964370643716437264373643746437564376643776437864379643806438164382643836438464385643866438764388643896439064391643926439364394643956439664397643986439964400644016440264403644046440564406644076440864409644106441164412644136441464415644166441764418644196442064421644226442364424644256442664427644286442964430644316443264433644346443564436644376443864439644406444164442644436444464445644466444764448644496445064451644526445364454644556445664457644586445964460644616446264463644646446564466644676446864469644706447164472644736447464475644766447764478644796448064481644826448364484644856448664487644886448964490644916449264493644946449564496644976449864499645006450164502645036450464505645066450764508645096451064511645126451364514645156451664517645186451964520645216452264523645246452564526645276452864529645306453164532645336453464535645366453764538645396454064541645426454364544645456454664547645486454964550645516455264553645546455564556645576455864559645606456164562645636456464565645666456764568645696457064571645726457364574645756457664577645786457964580645816458264583645846458564586645876458864589645906459164592645936459464595645966459764598645996460064601646026460364604646056460664607646086460964610646116461264613646146461564616646176461864619646206462164622646236462464625646266462764628646296463064631646326463364634646356463664637646386463964640646416464264643646446464564646646476464864649646506465164652646536465464655646566465764658646596466064661646626466364664646656466664667646686466964670646716467264673646746467564676646776467864679646806468164682646836468464685646866468764688646896469064691646926469364694646956469664697646986469964700647016470264703647046470564706647076470864709647106471164712647136471464715647166471764718647196472064721647226472364724647256472664727647286472964730647316473264733647346473564736647376473864739647406474164742647436474464745647466474764748647496475064751647526475364754647556475664757647586475964760647616476264763647646476564766647676476864769647706477164772647736477464775647766477764778647796478064781647826478364784647856478664787647886478964790647916479264793647946479564796647976479864799648006480164802648036480464805648066480764808648096481064811648126481364814648156481664817648186481964820648216482264823648246482564826648276482864829648306483164832648336483464835648366483764838648396484064841648426484364844648456484664847648486484964850648516485264853648546485564856648576485864859648606486164862648636486464865648666486764868648696487064871648726487364874648756487664877648786487964880648816488264883648846488564886648876488864889648906489164892648936489464895648966489764898648996490064901649026490364904649056490664907649086490964910649116491264913649146491564916649176491864919649206492164922649236492464925649266492764928649296493064931649326493364934649356493664937649386493964940649416494264943649446494564946649476494864949649506495164952649536495464955649566495764958649596496064961649626496364964649656496664967649686496964970649716497264973649746497564976649776497864979649806498164982649836498464985649866498764988649896499064991649926499364994649956499664997649986499965000650016500265003650046500565006650076500865009650106501165012650136501465015650166501765018650196502065021650226502365024650256502665027650286502965030650316503265033650346503565036650376503865039650406504165042650436504465045650466504765048650496505065051650526505365054650556505665057650586505965060650616506265063650646506565066650676506865069650706507165072650736507465075650766507765078650796508065081650826508365084650856508665087650886508965090650916509265093650946509565096650976509865099651006510165102651036510465105651066510765108651096511065111651126511365114651156511665117651186511965120651216512265123651246512565126651276512865129651306513165132651336513465135651366513765138651396514065141651426514365144651456514665147651486514965150651516515265153651546515565156651576515865159651606516165162651636516465165651666516765168651696517065171651726517365174651756517665177651786517965180651816518265183651846518565186651876518865189651906519165192651936519465195651966519765198651996520065201652026520365204652056520665207652086520965210652116521265213652146521565216652176521865219652206522165222652236522465225652266522765228652296523065231652326523365234652356523665237652386523965240652416524265243652446524565246652476524865249652506525165252652536525465255652566525765258652596526065261652626526365264652656526665267652686526965270652716527265273652746527565276652776527865279652806528165282652836528465285652866528765288652896529065291652926529365294652956529665297652986529965300653016530265303653046530565306653076530865309653106531165312653136531465315653166531765318653196532065321653226532365324653256532665327653286532965330653316533265333653346533565336653376533865339653406534165342653436534465345653466534765348653496535065351653526535365354653556535665357653586535965360653616536265363653646536565366653676536865369653706537165372653736537465375653766537765378653796538065381653826538365384653856538665387653886538965390653916539265393653946539565396653976539865399654006540165402654036540465405654066540765408654096541065411654126541365414654156541665417654186541965420654216542265423654246542565426654276542865429654306543165432654336543465435654366543765438654396544065441654426544365444654456544665447654486544965450654516545265453654546545565456654576545865459654606546165462654636546465465654666546765468654696547065471654726547365474654756547665477654786547965480654816548265483654846548565486654876548865489654906549165492654936549465495654966549765498654996550065501655026550365504655056550665507655086550965510655116551265513655146551565516655176551865519655206552165522655236552465525655266552765528655296553065531655326553365534655356553665537655386553965540655416554265543655446554565546655476554865549655506555165552655536555465555655566555765558655596556065561655626556365564655656556665567655686556965570655716557265573655746557565576655776557865579655806558165582655836558465585655866558765588655896559065591655926559365594655956559665597655986559965600656016560265603656046560565606656076560865609656106561165612656136561465615656166561765618656196562065621656226562365624656256562665627656286562965630656316563265633656346563565636656376563865639656406564165642656436564465645656466564765648656496565065651656526565365654656556565665657656586565965660656616566265663656646566565666656676566865669656706567165672656736567465675656766567765678656796568065681656826568365684656856568665687656886568965690656916569265693656946569565696656976569865699657006570165702657036570465705657066570765708657096571065711657126571365714657156571665717657186571965720657216572265723657246572565726657276572865729657306573165732657336573465735657366573765738657396574065741657426574365744657456574665747657486574965750657516575265753657546575565756657576575865759657606576165762657636576465765657666576765768657696577065771657726577365774657756577665777657786577965780657816578265783657846578565786657876578865789657906579165792657936579465795657966579765798657996580065801658026580365804658056580665807658086580965810658116581265813658146581565816658176581865819658206582165822658236582465825658266582765828658296583065831658326583365834658356583665837658386583965840658416584265843658446584565846658476584865849658506585165852658536585465855658566585765858658596586065861658626586365864658656586665867658686586965870658716587265873658746587565876658776587865879658806588165882658836588465885658866588765888658896589065891658926589365894658956589665897658986589965900659016590265903659046590565906659076590865909659106591165912659136591465915659166591765918659196592065921659226592365924659256592665927659286592965930659316593265933659346593565936659376593865939659406594165942659436594465945659466594765948659496595065951659526595365954659556595665957659586595965960659616596265963659646596565966659676596865969659706597165972659736597465975659766597765978659796598065981659826598365984659856598665987659886598965990659916599265993659946599565996659976599865999660006600166002660036600466005660066600766008660096601066011660126601366014660156601666017660186601966020660216602266023660246602566026660276602866029660306603166032660336603466035660366603766038660396604066041660426604366044660456604666047660486604966050660516605266053660546605566056660576605866059660606606166062660636606466065660666606766068660696607066071660726607366074660756607666077660786607966080660816608266083660846608566086660876608866089660906609166092660936609466095660966609766098660996610066101661026610366104661056610666107661086610966110661116611266113661146611566116661176611866119661206612166122661236612466125661266612766128661296613066131661326613366134661356613666137661386613966140661416614266143661446614566146661476614866149661506615166152661536615466155661566615766158661596616066161661626616366164661656616666167661686616966170661716617266173661746617566176661776617866179661806618166182661836618466185661866618766188661896619066191661926619366194661956619666197661986619966200662016620266203662046620566206662076620866209662106621166212662136621466215662166621766218662196622066221662226622366224662256622666227662286622966230662316623266233662346623566236662376623866239662406624166242662436624466245662466624766248662496625066251662526625366254662556625666257662586625966260662616626266263662646626566266662676626866269662706627166272662736627466275662766627766278662796628066281662826628366284662856628666287662886628966290662916629266293662946629566296662976629866299663006630166302663036630466305663066630766308663096631066311663126631366314663156631666317663186631966320663216632266323663246632566326663276632866329663306633166332663336633466335663366633766338663396634066341663426634366344663456634666347663486634966350663516635266353663546635566356663576635866359663606636166362663636636466365663666636766368663696637066371663726637366374663756637666377663786637966380663816638266383663846638566386663876638866389663906639166392663936639466395663966639766398663996640066401664026640366404664056640666407664086640966410664116641266413664146641566416664176641866419664206642166422664236642466425664266642766428664296643066431664326643366434664356643666437664386643966440664416644266443664446644566446664476644866449664506645166452664536645466455664566645766458664596646066461664626646366464664656646666467664686646966470664716647266473664746647566476664776647866479664806648166482664836648466485664866648766488664896649066491664926649366494664956649666497664986649966500665016650266503665046650566506665076650866509665106651166512665136651466515665166651766518665196652066521665226652366524665256652666527665286652966530665316653266533665346653566536665376653866539665406654166542665436654466545665466654766548665496655066551665526655366554665556655666557665586655966560665616656266563665646656566566665676656866569665706657166572665736657466575665766657766578665796658066581665826658366584665856658666587665886658966590665916659266593665946659566596665976659866599666006660166602666036660466605666066660766608666096661066611666126661366614666156661666617666186661966620666216662266623666246662566626666276662866629666306663166632666336663466635666366663766638666396664066641666426664366644666456664666647666486664966650666516665266653666546665566656666576665866659666606666166662666636666466665666666666766668666696667066671666726667366674666756667666677666786667966680666816668266683666846668566686666876668866689666906669166692666936669466695666966669766698666996670066701667026670366704667056670666707667086670966710667116671266713667146671566716667176671866719667206672166722667236672466725667266672766728667296673066731667326673366734667356673666737667386673966740667416674266743667446674566746667476674866749667506675166752667536675466755667566675766758667596676066761667626676366764667656676666767667686676966770667716677266773667746677566776667776677866779667806678166782667836678466785667866678766788667896679066791667926679366794667956679666797667986679966800668016680266803668046680566806668076680866809668106681166812668136681466815668166681766818668196682066821668226682366824668256682666827668286682966830668316683266833668346683566836668376683866839668406684166842668436684466845668466684766848668496685066851668526685366854668556685666857668586685966860668616686266863668646686566866668676686866869668706687166872668736687466875668766687766878668796688066881668826688366884668856688666887668886688966890668916689266893668946689566896668976689866899669006690166902669036690466905669066690766908669096691066911669126691366914669156691666917669186691966920669216692266923669246692566926669276692866929669306693166932669336693466935669366693766938669396694066941669426694366944669456694666947669486694966950669516695266953669546695566956669576695866959669606696166962669636696466965669666696766968669696697066971669726697366974669756697666977669786697966980669816698266983669846698566986669876698866989669906699166992669936699466995669966699766998669996700067001670026700367004670056700667007670086700967010670116701267013670146701567016670176701867019670206702167022670236702467025670266702767028670296703067031670326703367034670356703667037670386703967040670416704267043670446704567046670476704867049670506705167052670536705467055670566705767058670596706067061670626706367064670656706667067670686706967070670716707267073670746707567076670776707867079670806708167082670836708467085670866708767088670896709067091670926709367094670956709667097670986709967100671016710267103671046710567106671076710867109671106711167112671136711467115671166711767118671196712067121671226712367124671256712667127671286712967130671316713267133671346713567136671376713867139671406714167142671436714467145671466714767148671496715067151671526715367154671556715667157671586715967160671616716267163671646716567166671676716867169671706717167172671736717467175671766717767178671796718067181671826718367184671856718667187671886718967190671916719267193671946719567196671976719867199672006720167202672036720467205672066720767208672096721067211672126721367214672156721667217672186721967220672216722267223672246722567226672276722867229672306723167232672336723467235672366723767238672396724067241672426724367244672456724667247672486724967250672516725267253672546725567256672576725867259672606726167262672636726467265672666726767268672696727067271672726727367274672756727667277672786727967280672816728267283672846728567286672876728867289672906729167292672936729467295672966729767298672996730067301673026730367304673056730667307673086730967310673116731267313673146731567316673176731867319673206732167322673236732467325673266732767328673296733067331673326733367334673356733667337673386733967340673416734267343673446734567346673476734867349673506735167352673536735467355673566735767358673596736067361673626736367364673656736667367673686736967370673716737267373673746737567376673776737867379673806738167382673836738467385673866738767388673896739067391673926739367394673956739667397673986739967400674016740267403674046740567406674076740867409674106741167412674136741467415674166741767418674196742067421674226742367424674256742667427674286742967430674316743267433674346743567436674376743867439674406744167442674436744467445674466744767448674496745067451674526745367454674556745667457674586745967460674616746267463674646746567466674676746867469674706747167472674736747467475674766747767478674796748067481674826748367484674856748667487674886748967490674916749267493674946749567496674976749867499675006750167502675036750467505675066750767508675096751067511675126751367514675156751667517675186751967520675216752267523675246752567526675276752867529675306753167532675336753467535675366753767538675396754067541675426754367544675456754667547675486754967550675516755267553675546755567556675576755867559675606756167562675636756467565675666756767568675696757067571675726757367574675756757667577675786757967580675816758267583675846758567586675876758867589675906759167592675936759467595675966759767598675996760067601676026760367604676056760667607676086760967610676116761267613676146761567616676176761867619676206762167622676236762467625676266762767628676296763067631676326763367634676356763667637676386763967640676416764267643676446764567646676476764867649676506765167652676536765467655676566765767658676596766067661676626766367664676656766667667676686766967670676716767267673676746767567676676776767867679676806768167682676836768467685676866768767688676896769067691676926769367694676956769667697676986769967700677016770267703677046770567706677076770867709677106771167712677136771467715677166771767718677196772067721677226772367724677256772667727677286772967730677316773267733677346773567736677376773867739677406774167742677436774467745677466774767748677496775067751677526775367754677556775667757677586775967760677616776267763677646776567766677676776867769677706777167772677736777467775677766777767778677796778067781677826778367784677856778667787677886778967790677916779267793677946779567796677976779867799678006780167802678036780467805678066780767808678096781067811678126781367814678156781667817678186781967820678216782267823678246782567826678276782867829678306783167832678336783467835678366783767838678396784067841678426784367844678456784667847678486784967850678516785267853678546785567856678576785867859678606786167862678636786467865678666786767868678696787067871678726787367874678756787667877678786787967880678816788267883678846788567886678876788867889678906789167892678936789467895678966789767898678996790067901679026790367904679056790667907679086790967910679116791267913679146791567916679176791867919679206792167922679236792467925679266792767928679296793067931679326793367934679356793667937679386793967940679416794267943679446794567946679476794867949679506795167952679536795467955679566795767958679596796067961679626796367964679656796667967679686796967970679716797267973679746797567976679776797867979679806798167982679836798467985679866798767988679896799067991679926799367994679956799667997679986799968000680016800268003680046800568006680076800868009680106801168012680136801468015680166801768018680196802068021680226802368024680256802668027680286802968030680316803268033680346803568036680376803868039680406804168042680436804468045680466804768048680496805068051680526805368054680556805668057680586805968060680616806268063680646806568066680676806868069680706807168072680736807468075680766807768078680796808068081680826808368084680856808668087680886808968090680916809268093680946809568096680976809868099681006810168102681036810468105681066810768108681096811068111681126811368114681156811668117681186811968120681216812268123681246812568126681276812868129681306813168132681336813468135681366813768138681396814068141681426814368144681456814668147681486814968150681516815268153681546815568156681576815868159681606816168162681636816468165681666816768168681696817068171681726817368174681756817668177681786817968180681816818268183681846818568186681876818868189681906819168192681936819468195681966819768198681996820068201682026820368204682056820668207682086820968210682116821268213682146821568216682176821868219682206822168222682236822468225682266822768228682296823068231682326823368234682356823668237682386823968240682416824268243682446824568246682476824868249682506825168252682536825468255682566825768258682596826068261682626826368264682656826668267682686826968270682716827268273682746827568276682776827868279682806828168282682836828468285682866828768288682896829068291682926829368294682956829668297682986829968300683016830268303683046830568306683076830868309683106831168312683136831468315683166831768318683196832068321683226832368324683256832668327683286832968330683316833268333683346833568336683376833868339683406834168342683436834468345683466834768348683496835068351683526835368354683556835668357683586835968360683616836268363683646836568366683676836868369683706837168372683736837468375683766837768378683796838068381683826838368384683856838668387683886838968390683916839268393683946839568396683976839868399684006840168402684036840468405684066840768408684096841068411684126841368414684156841668417684186841968420684216842268423684246842568426684276842868429684306843168432684336843468435684366843768438684396844068441684426844368444684456844668447684486844968450684516845268453684546845568456684576845868459684606846168462684636846468465684666846768468684696847068471684726847368474684756847668477684786847968480684816848268483684846848568486684876848868489684906849168492684936849468495684966849768498684996850068501685026850368504685056850668507685086850968510685116851268513685146851568516685176851868519685206852168522685236852468525685266852768528685296853068531685326853368534685356853668537685386853968540685416854268543685446854568546685476854868549685506855168552685536855468555685566855768558685596856068561685626856368564685656856668567685686856968570685716857268573685746857568576685776857868579685806858168582685836858468585685866858768588685896859068591685926859368594685956859668597685986859968600686016860268603686046860568606686076860868609686106861168612686136861468615686166861768618686196862068621686226862368624686256862668627686286862968630686316863268633686346863568636686376863868639686406864168642686436864468645686466864768648686496865068651686526865368654686556865668657686586865968660686616866268663686646866568666686676866868669686706867168672686736867468675686766867768678686796868068681686826868368684686856868668687686886868968690686916869268693686946869568696686976869868699687006870168702687036870468705687066870768708687096871068711687126871368714687156871668717687186871968720687216872268723687246872568726687276872868729687306873168732687336873468735687366873768738687396874068741687426874368744687456874668747687486874968750687516875268753687546875568756687576875868759687606876168762687636876468765687666876768768687696877068771687726877368774687756877668777687786877968780687816878268783687846878568786687876878868789687906879168792687936879468795687966879768798687996880068801688026880368804688056880668807688086880968810688116881268813688146881568816688176881868819688206882168822688236882468825688266882768828688296883068831688326883368834688356883668837688386883968840688416884268843688446884568846688476884868849688506885168852688536885468855688566885768858688596886068861688626886368864688656886668867688686886968870688716887268873688746887568876688776887868879688806888168882688836888468885688866888768888688896889068891688926889368894688956889668897688986889968900689016890268903689046890568906689076890868909689106891168912689136891468915689166891768918689196892068921689226892368924689256892668927689286892968930689316893268933689346893568936689376893868939689406894168942689436894468945689466894768948689496895068951689526895368954689556895668957689586895968960689616896268963689646896568966689676896868969689706897168972689736897468975689766897768978689796898068981689826898368984689856898668987689886898968990689916899268993689946899568996689976899868999690006900169002690036900469005690066900769008690096901069011690126901369014690156901669017690186901969020690216902269023690246902569026690276902869029690306903169032690336903469035690366903769038690396904069041690426904369044690456904669047690486904969050690516905269053690546905569056690576905869059690606906169062690636906469065690666906769068690696907069071690726907369074690756907669077690786907969080690816908269083690846908569086690876908869089690906909169092690936909469095690966909769098690996910069101691026910369104691056910669107691086910969110691116911269113691146911569116691176911869119691206912169122691236912469125691266912769128691296913069131691326913369134691356913669137691386913969140691416914269143691446914569146691476914869149691506915169152691536915469155691566915769158691596916069161691626916369164691656916669167691686916969170691716917269173691746917569176691776917869179691806918169182691836918469185691866918769188691896919069191691926919369194691956919669197691986919969200692016920269203692046920569206692076920869209692106921169212692136921469215692166921769218692196922069221692226922369224692256922669227692286922969230692316923269233692346923569236692376923869239692406924169242692436924469245692466924769248692496925069251692526925369254692556925669257692586925969260692616926269263692646926569266692676926869269692706927169272692736927469275692766927769278692796928069281692826928369284692856928669287692886928969290692916929269293692946929569296692976929869299693006930169302693036930469305693066930769308693096931069311693126931369314693156931669317693186931969320693216932269323693246932569326693276932869329693306933169332693336933469335693366933769338693396934069341693426934369344693456934669347693486934969350693516935269353693546935569356693576935869359693606936169362693636936469365693666936769368693696937069371693726937369374693756937669377693786937969380693816938269383693846938569386693876938869389693906939169392693936939469395693966939769398693996940069401694026940369404694056940669407694086940969410694116941269413694146941569416694176941869419694206942169422694236942469425694266942769428694296943069431694326943369434694356943669437694386943969440694416944269443694446944569446694476944869449694506945169452694536945469455694566945769458694596946069461694626946369464694656946669467694686946969470694716947269473694746947569476694776947869479694806948169482694836948469485694866948769488694896949069491694926949369494694956949669497694986949969500695016950269503695046950569506695076950869509695106951169512695136951469515695166951769518695196952069521695226952369524695256952669527695286952969530695316953269533695346953569536695376953869539695406954169542695436954469545695466954769548695496955069551695526955369554695556955669557695586955969560695616956269563695646956569566695676956869569695706957169572695736957469575695766957769578695796958069581695826958369584695856958669587695886958969590695916959269593695946959569596695976959869599696006960169602696036960469605696066960769608696096961069611696126961369614696156961669617696186961969620696216962269623696246962569626696276962869629696306963169632696336963469635696366963769638696396964069641696426964369644696456964669647696486964969650696516965269653696546965569656696576965869659696606966169662696636966469665696666966769668696696967069671696726967369674696756967669677696786967969680696816968269683696846968569686696876968869689696906969169692696936969469695696966969769698696996970069701697026970369704697056970669707697086970969710697116971269713697146971569716697176971869719697206972169722697236972469725697266972769728697296973069731697326973369734697356973669737697386973969740697416974269743697446974569746697476974869749697506975169752697536975469755697566975769758697596976069761697626976369764697656976669767697686976969770697716977269773697746977569776697776977869779697806978169782697836978469785697866978769788697896979069791697926979369794697956979669797697986979969800698016980269803698046980569806698076980869809698106981169812698136981469815698166981769818698196982069821698226982369824698256982669827698286982969830698316983269833698346983569836698376983869839698406984169842698436984469845698466984769848698496985069851698526985369854698556985669857698586985969860698616986269863698646986569866698676986869869698706987169872698736987469875698766987769878698796988069881698826988369884698856988669887698886988969890698916989269893698946989569896698976989869899699006990169902699036990469905699066990769908699096991069911699126991369914699156991669917699186991969920699216992269923699246992569926699276992869929699306993169932699336993469935699366993769938699396994069941699426994369944699456994669947699486994969950699516995269953699546995569956699576995869959699606996169962699636996469965699666996769968699696997069971699726997369974699756997669977699786997969980699816998269983699846998569986699876998869989699906999169992699936999469995699966999769998699997000070001700027000370004700057000670007700087000970010700117001270013700147001570016700177001870019700207002170022700237002470025700267002770028700297003070031700327003370034700357003670037700387003970040700417004270043700447004570046700477004870049700507005170052700537005470055700567005770058700597006070061700627006370064700657006670067700687006970070700717007270073700747007570076700777007870079700807008170082700837008470085700867008770088700897009070091700927009370094700957009670097700987009970100701017010270103701047010570106701077010870109701107011170112701137011470115701167011770118701197012070121701227012370124701257012670127701287012970130701317013270133701347013570136701377013870139701407014170142701437014470145701467014770148701497015070151701527015370154701557015670157701587015970160701617016270163701647016570166701677016870169701707017170172701737017470175701767017770178701797018070181701827018370184701857018670187701887018970190701917019270193701947019570196701977019870199702007020170202702037020470205702067020770208702097021070211702127021370214702157021670217702187021970220702217022270223702247022570226702277022870229702307023170232702337023470235702367023770238702397024070241702427024370244702457024670247702487024970250702517025270253702547025570256702577025870259702607026170262702637026470265702667026770268702697027070271702727027370274702757027670277702787027970280702817028270283702847028570286702877028870289702907029170292702937029470295702967029770298702997030070301703027030370304703057030670307703087030970310703117031270313703147031570316703177031870319703207032170322703237032470325703267032770328703297033070331703327033370334703357033670337703387033970340703417034270343703447034570346703477034870349703507035170352703537035470355703567035770358703597036070361703627036370364703657036670367703687036970370703717037270373703747037570376703777037870379703807038170382703837038470385703867038770388703897039070391703927039370394703957039670397703987039970400704017040270403704047040570406704077040870409704107041170412704137041470415704167041770418704197042070421704227042370424704257042670427704287042970430704317043270433704347043570436704377043870439704407044170442704437044470445704467044770448704497045070451704527045370454704557045670457704587045970460704617046270463704647046570466704677046870469704707047170472704737047470475704767047770478704797048070481704827048370484704857048670487704887048970490704917049270493704947049570496704977049870499705007050170502705037050470505705067050770508705097051070511705127051370514705157051670517705187051970520705217052270523705247052570526705277052870529705307053170532705337053470535705367053770538705397054070541705427054370544705457054670547705487054970550705517055270553705547055570556705577055870559705607056170562705637056470565705667056770568705697057070571705727057370574705757057670577705787057970580705817058270583705847058570586705877058870589705907059170592705937059470595705967059770598705997060070601706027060370604706057060670607706087060970610706117061270613706147061570616706177061870619706207062170622706237062470625706267062770628706297063070631706327063370634706357063670637706387063970640706417064270643706447064570646706477064870649706507065170652706537065470655706567065770658706597066070661706627066370664706657066670667706687066970670706717067270673706747067570676706777067870679706807068170682706837068470685706867068770688706897069070691706927069370694706957069670697706987069970700707017070270703707047070570706707077070870709707107071170712707137071470715707167071770718707197072070721707227072370724707257072670727707287072970730707317073270733707347073570736707377073870739707407074170742707437074470745707467074770748707497075070751707527075370754707557075670757707587075970760707617076270763707647076570766707677076870769707707077170772707737077470775707767077770778707797078070781707827078370784707857078670787707887078970790707917079270793707947079570796707977079870799708007080170802708037080470805708067080770808708097081070811708127081370814708157081670817708187081970820708217082270823708247082570826708277082870829708307083170832708337083470835708367083770838708397084070841708427084370844708457084670847708487084970850708517085270853708547085570856708577085870859708607086170862708637086470865708667086770868708697087070871708727087370874708757087670877708787087970880708817088270883708847088570886708877088870889708907089170892708937089470895708967089770898708997090070901709027090370904709057090670907709087090970910709117091270913709147091570916709177091870919709207092170922709237092470925709267092770928709297093070931709327093370934709357093670937709387093970940709417094270943709447094570946709477094870949709507095170952709537095470955709567095770958709597096070961709627096370964709657096670967709687096970970709717097270973709747097570976709777097870979709807098170982709837098470985709867098770988709897099070991709927099370994709957099670997709987099971000710017100271003710047100571006710077100871009710107101171012710137101471015710167101771018710197102071021710227102371024710257102671027710287102971030710317103271033710347103571036710377103871039710407104171042710437104471045710467104771048710497105071051710527105371054710557105671057710587105971060710617106271063710647106571066710677106871069710707107171072710737107471075710767107771078710797108071081710827108371084710857108671087710887108971090710917109271093710947109571096710977109871099711007110171102711037110471105711067110771108711097111071111711127111371114711157111671117711187111971120711217112271123711247112571126711277112871129711307113171132711337113471135711367113771138711397114071141711427114371144711457114671147711487114971150711517115271153711547115571156711577115871159711607116171162711637116471165711667116771168711697117071171711727117371174711757117671177711787117971180711817118271183711847118571186711877118871189711907119171192711937119471195711967119771198711997120071201712027120371204712057120671207712087120971210712117121271213712147121571216712177121871219712207122171222712237122471225712267122771228712297123071231712327123371234712357123671237712387123971240712417124271243712447124571246712477124871249712507125171252712537125471255712567125771258712597126071261712627126371264712657126671267712687126971270712717127271273712747127571276712777127871279712807128171282712837128471285712867128771288712897129071291712927129371294712957129671297712987129971300713017130271303713047130571306713077130871309713107131171312713137131471315713167131771318713197132071321713227132371324713257132671327713287132971330713317133271333713347133571336713377133871339713407134171342713437134471345713467134771348713497135071351713527135371354713557135671357713587135971360713617136271363713647136571366713677136871369713707137171372713737137471375713767137771378713797138071381713827138371384713857138671387713887138971390713917139271393713947139571396713977139871399714007140171402714037140471405714067140771408714097141071411714127141371414714157141671417714187141971420714217142271423714247142571426714277142871429714307143171432714337143471435714367143771438714397144071441714427144371444714457144671447714487144971450714517145271453714547145571456714577145871459714607146171462714637146471465714667146771468714697147071471714727147371474714757147671477714787147971480714817148271483714847148571486714877148871489714907149171492714937149471495714967149771498714997150071501715027150371504715057150671507715087150971510715117151271513715147151571516715177151871519715207152171522715237152471525715267152771528715297153071531715327153371534715357153671537715387153971540715417154271543715447154571546715477154871549715507155171552715537155471555715567155771558715597156071561715627156371564715657156671567715687156971570715717157271573715747157571576715777157871579715807158171582715837158471585715867158771588715897159071591715927159371594715957159671597715987159971600716017160271603716047160571606716077160871609716107161171612716137161471615716167161771618716197162071621716227162371624716257162671627716287162971630716317163271633716347163571636716377163871639716407164171642716437164471645716467164771648716497165071651716527165371654716557165671657716587165971660716617166271663716647166571666716677166871669716707167171672716737167471675716767167771678716797168071681716827168371684716857168671687716887168971690716917169271693716947169571696716977169871699717007170171702717037170471705717067170771708717097171071711717127171371714717157171671717717187171971720717217172271723717247172571726717277172871729717307173171732717337173471735717367173771738717397174071741717427174371744717457174671747717487174971750717517175271753717547175571756717577175871759717607176171762717637176471765717667176771768717697177071771717727177371774717757177671777717787177971780717817178271783717847178571786717877178871789717907179171792717937179471795717967179771798717997180071801718027180371804718057180671807718087180971810718117181271813718147181571816718177181871819718207182171822718237182471825718267182771828718297183071831718327183371834718357183671837718387183971840718417184271843718447184571846718477184871849718507185171852718537185471855718567185771858718597186071861718627186371864718657186671867718687186971870718717187271873718747187571876718777187871879718807188171882718837188471885718867188771888718897189071891718927189371894718957189671897718987189971900719017190271903719047190571906719077190871909719107191171912719137191471915719167191771918719197192071921719227192371924719257192671927719287192971930719317193271933719347193571936719377193871939719407194171942719437194471945719467194771948719497195071951719527195371954719557195671957719587195971960719617196271963719647196571966719677196871969719707197171972719737197471975719767197771978719797198071981719827198371984719857198671987719887198971990719917199271993719947199571996719977199871999720007200172002720037200472005720067200772008720097201072011720127201372014720157201672017720187201972020720217202272023720247202572026720277202872029720307203172032720337203472035720367203772038720397204072041720427204372044720457204672047720487204972050720517205272053720547205572056720577205872059720607206172062720637206472065720667206772068720697207072071720727207372074720757207672077720787207972080720817208272083720847208572086720877208872089720907209172092720937209472095720967209772098720997210072101721027210372104721057210672107721087210972110721117211272113721147211572116721177211872119721207212172122721237212472125721267212772128721297213072131721327213372134721357213672137721387213972140721417214272143721447214572146721477214872149721507215172152721537215472155721567215772158721597216072161721627216372164721657216672167721687216972170721717217272173721747217572176721777217872179721807218172182721837218472185721867218772188721897219072191721927219372194721957219672197721987219972200722017220272203722047220572206722077220872209722107221172212722137221472215722167221772218722197222072221722227222372224722257222672227722287222972230722317223272233722347223572236722377223872239722407224172242722437224472245722467224772248722497225072251722527225372254722557225672257722587225972260722617226272263722647226572266722677226872269722707227172272722737227472275722767227772278722797228072281722827228372284722857228672287722887228972290722917229272293722947229572296722977229872299723007230172302723037230472305723067230772308723097231072311723127231372314723157231672317723187231972320723217232272323723247232572326723277232872329723307233172332723337233472335723367233772338723397234072341723427234372344723457234672347723487234972350723517235272353723547235572356723577235872359723607236172362723637236472365723667236772368723697237072371723727237372374723757237672377723787237972380723817238272383723847238572386723877238872389723907239172392723937239472395723967239772398723997240072401724027240372404724057240672407724087240972410724117241272413724147241572416724177241872419724207242172422724237242472425724267242772428724297243072431724327243372434724357243672437724387243972440724417244272443724447244572446724477244872449724507245172452724537245472455724567245772458724597246072461724627246372464724657246672467724687246972470724717247272473724747247572476724777247872479724807248172482724837248472485724867248772488724897249072491724927249372494724957249672497724987249972500725017250272503725047250572506725077250872509725107251172512725137251472515725167251772518725197252072521725227252372524725257252672527725287252972530725317253272533725347253572536725377253872539725407254172542725437254472545725467254772548725497255072551725527255372554725557255672557725587255972560725617256272563725647256572566725677256872569725707257172572725737257472575725767257772578725797258072581725827258372584725857258672587725887258972590725917259272593725947259572596725977259872599
  1. /*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to deal
  5. * in the Software without restriction, including without limitation the rights
  6. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. * copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  18. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  19. * THE SOFTWARE.
  20. *
  21. *===-----------------------------------------------------------------------===
  22. */
  23. #ifndef __ARM_NEON_H
  24. #define __ARM_NEON_H
  25. #if !defined(__ARM_NEON)
  26. #error "NEON support not enabled"
  27. #endif
  28. #include <stdint.h>
  29. typedef float float32_t;
  30. typedef __fp16 float16_t;
  31. #ifdef __aarch64__
  32. typedef double float64_t;
  33. #endif
  34. #ifdef __aarch64__
  35. typedef uint8_t poly8_t;
  36. typedef uint16_t poly16_t;
  37. typedef uint64_t poly64_t;
  38. typedef __uint128_t poly128_t;
  39. #else
  40. typedef int8_t poly8_t;
  41. typedef int16_t poly16_t;
  42. #endif
  43. typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
  44. typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
  45. typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
  46. typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
  47. typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
  48. typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
  49. typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
  50. typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
  51. typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
  52. typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
  53. typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
  54. typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
  55. typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
  56. typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
  57. typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
  58. typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
  59. typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
  60. typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
  61. typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
  62. typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
  63. #ifdef __aarch64__
  64. typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
  65. typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
  66. #endif
  67. typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
  68. typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
  69. typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
  70. typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
  71. #ifdef __aarch64__
  72. typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
  73. typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
  74. #endif
  75. typedef struct int8x8x2_t {
  76. int8x8_t val[2];
  77. } int8x8x2_t;
  78. typedef struct int8x16x2_t {
  79. int8x16_t val[2];
  80. } int8x16x2_t;
  81. typedef struct int16x4x2_t {
  82. int16x4_t val[2];
  83. } int16x4x2_t;
  84. typedef struct int16x8x2_t {
  85. int16x8_t val[2];
  86. } int16x8x2_t;
  87. typedef struct int32x2x2_t {
  88. int32x2_t val[2];
  89. } int32x2x2_t;
  90. typedef struct int32x4x2_t {
  91. int32x4_t val[2];
  92. } int32x4x2_t;
  93. typedef struct int64x1x2_t {
  94. int64x1_t val[2];
  95. } int64x1x2_t;
  96. typedef struct int64x2x2_t {
  97. int64x2_t val[2];
  98. } int64x2x2_t;
  99. typedef struct uint8x8x2_t {
  100. uint8x8_t val[2];
  101. } uint8x8x2_t;
  102. typedef struct uint8x16x2_t {
  103. uint8x16_t val[2];
  104. } uint8x16x2_t;
  105. typedef struct uint16x4x2_t {
  106. uint16x4_t val[2];
  107. } uint16x4x2_t;
  108. typedef struct uint16x8x2_t {
  109. uint16x8_t val[2];
  110. } uint16x8x2_t;
  111. typedef struct uint32x2x2_t {
  112. uint32x2_t val[2];
  113. } uint32x2x2_t;
  114. typedef struct uint32x4x2_t {
  115. uint32x4_t val[2];
  116. } uint32x4x2_t;
  117. typedef struct uint64x1x2_t {
  118. uint64x1_t val[2];
  119. } uint64x1x2_t;
  120. typedef struct uint64x2x2_t {
  121. uint64x2_t val[2];
  122. } uint64x2x2_t;
  123. typedef struct float16x4x2_t {
  124. float16x4_t val[2];
  125. } float16x4x2_t;
  126. typedef struct float16x8x2_t {
  127. float16x8_t val[2];
  128. } float16x8x2_t;
  129. typedef struct float32x2x2_t {
  130. float32x2_t val[2];
  131. } float32x2x2_t;
  132. typedef struct float32x4x2_t {
  133. float32x4_t val[2];
  134. } float32x4x2_t;
  135. #ifdef __aarch64__
  136. typedef struct float64x1x2_t {
  137. float64x1_t val[2];
  138. } float64x1x2_t;
  139. typedef struct float64x2x2_t {
  140. float64x2_t val[2];
  141. } float64x2x2_t;
  142. #endif
  143. typedef struct poly8x8x2_t {
  144. poly8x8_t val[2];
  145. } poly8x8x2_t;
  146. typedef struct poly8x16x2_t {
  147. poly8x16_t val[2];
  148. } poly8x16x2_t;
  149. typedef struct poly16x4x2_t {
  150. poly16x4_t val[2];
  151. } poly16x4x2_t;
  152. typedef struct poly16x8x2_t {
  153. poly16x8_t val[2];
  154. } poly16x8x2_t;
  155. #ifdef __aarch64__
  156. typedef struct poly64x1x2_t {
  157. poly64x1_t val[2];
  158. } poly64x1x2_t;
  159. typedef struct poly64x2x2_t {
  160. poly64x2_t val[2];
  161. } poly64x2x2_t;
  162. #endif
  163. typedef struct int8x8x3_t {
  164. int8x8_t val[3];
  165. } int8x8x3_t;
  166. typedef struct int8x16x3_t {
  167. int8x16_t val[3];
  168. } int8x16x3_t;
  169. typedef struct int16x4x3_t {
  170. int16x4_t val[3];
  171. } int16x4x3_t;
  172. typedef struct int16x8x3_t {
  173. int16x8_t val[3];
  174. } int16x8x3_t;
  175. typedef struct int32x2x3_t {
  176. int32x2_t val[3];
  177. } int32x2x3_t;
  178. typedef struct int32x4x3_t {
  179. int32x4_t val[3];
  180. } int32x4x3_t;
  181. typedef struct int64x1x3_t {
  182. int64x1_t val[3];
  183. } int64x1x3_t;
  184. typedef struct int64x2x3_t {
  185. int64x2_t val[3];
  186. } int64x2x3_t;
  187. typedef struct uint8x8x3_t {
  188. uint8x8_t val[3];
  189. } uint8x8x3_t;
  190. typedef struct uint8x16x3_t {
  191. uint8x16_t val[3];
  192. } uint8x16x3_t;
  193. typedef struct uint16x4x3_t {
  194. uint16x4_t val[3];
  195. } uint16x4x3_t;
  196. typedef struct uint16x8x3_t {
  197. uint16x8_t val[3];
  198. } uint16x8x3_t;
  199. typedef struct uint32x2x3_t {
  200. uint32x2_t val[3];
  201. } uint32x2x3_t;
  202. typedef struct uint32x4x3_t {
  203. uint32x4_t val[3];
  204. } uint32x4x3_t;
  205. typedef struct uint64x1x3_t {
  206. uint64x1_t val[3];
  207. } uint64x1x3_t;
  208. typedef struct uint64x2x3_t {
  209. uint64x2_t val[3];
  210. } uint64x2x3_t;
  211. typedef struct float16x4x3_t {
  212. float16x4_t val[3];
  213. } float16x4x3_t;
  214. typedef struct float16x8x3_t {
  215. float16x8_t val[3];
  216. } float16x8x3_t;
  217. typedef struct float32x2x3_t {
  218. float32x2_t val[3];
  219. } float32x2x3_t;
  220. typedef struct float32x4x3_t {
  221. float32x4_t val[3];
  222. } float32x4x3_t;
  223. #ifdef __aarch64__
  224. typedef struct float64x1x3_t {
  225. float64x1_t val[3];
  226. } float64x1x3_t;
  227. typedef struct float64x2x3_t {
  228. float64x2_t val[3];
  229. } float64x2x3_t;
  230. #endif
  231. typedef struct poly8x8x3_t {
  232. poly8x8_t val[3];
  233. } poly8x8x3_t;
  234. typedef struct poly8x16x3_t {
  235. poly8x16_t val[3];
  236. } poly8x16x3_t;
  237. typedef struct poly16x4x3_t {
  238. poly16x4_t val[3];
  239. } poly16x4x3_t;
  240. typedef struct poly16x8x3_t {
  241. poly16x8_t val[3];
  242. } poly16x8x3_t;
  243. #ifdef __aarch64__
  244. typedef struct poly64x1x3_t {
  245. poly64x1_t val[3];
  246. } poly64x1x3_t;
  247. typedef struct poly64x2x3_t {
  248. poly64x2_t val[3];
  249. } poly64x2x3_t;
  250. #endif
  251. typedef struct int8x8x4_t {
  252. int8x8_t val[4];
  253. } int8x8x4_t;
  254. typedef struct int8x16x4_t {
  255. int8x16_t val[4];
  256. } int8x16x4_t;
  257. typedef struct int16x4x4_t {
  258. int16x4_t val[4];
  259. } int16x4x4_t;
  260. typedef struct int16x8x4_t {
  261. int16x8_t val[4];
  262. } int16x8x4_t;
  263. typedef struct int32x2x4_t {
  264. int32x2_t val[4];
  265. } int32x2x4_t;
  266. typedef struct int32x4x4_t {
  267. int32x4_t val[4];
  268. } int32x4x4_t;
  269. typedef struct int64x1x4_t {
  270. int64x1_t val[4];
  271. } int64x1x4_t;
  272. typedef struct int64x2x4_t {
  273. int64x2_t val[4];
  274. } int64x2x4_t;
  275. typedef struct uint8x8x4_t {
  276. uint8x8_t val[4];
  277. } uint8x8x4_t;
  278. typedef struct uint8x16x4_t {
  279. uint8x16_t val[4];
  280. } uint8x16x4_t;
  281. typedef struct uint16x4x4_t {
  282. uint16x4_t val[4];
  283. } uint16x4x4_t;
  284. typedef struct uint16x8x4_t {
  285. uint16x8_t val[4];
  286. } uint16x8x4_t;
  287. typedef struct uint32x2x4_t {
  288. uint32x2_t val[4];
  289. } uint32x2x4_t;
  290. typedef struct uint32x4x4_t {
  291. uint32x4_t val[4];
  292. } uint32x4x4_t;
  293. typedef struct uint64x1x4_t {
  294. uint64x1_t val[4];
  295. } uint64x1x4_t;
  296. typedef struct uint64x2x4_t {
  297. uint64x2_t val[4];
  298. } uint64x2x4_t;
  299. typedef struct float16x4x4_t {
  300. float16x4_t val[4];
  301. } float16x4x4_t;
  302. typedef struct float16x8x4_t {
  303. float16x8_t val[4];
  304. } float16x8x4_t;
  305. typedef struct float32x2x4_t {
  306. float32x2_t val[4];
  307. } float32x2x4_t;
  308. typedef struct float32x4x4_t {
  309. float32x4_t val[4];
  310. } float32x4x4_t;
  311. #ifdef __aarch64__
  312. typedef struct float64x1x4_t {
  313. float64x1_t val[4];
  314. } float64x1x4_t;
  315. typedef struct float64x2x4_t {
  316. float64x2_t val[4];
  317. } float64x2x4_t;
  318. #endif
  319. typedef struct poly8x8x4_t {
  320. poly8x8_t val[4];
  321. } poly8x8x4_t;
  322. typedef struct poly8x16x4_t {
  323. poly8x16_t val[4];
  324. } poly8x16x4_t;
  325. typedef struct poly16x4x4_t {
  326. poly16x4_t val[4];
  327. } poly16x4x4_t;
  328. typedef struct poly16x8x4_t {
  329. poly16x8_t val[4];
  330. } poly16x8x4_t;
  331. #ifdef __aarch64__
  332. typedef struct poly64x1x4_t {
  333. poly64x1_t val[4];
  334. } poly64x1x4_t;
  335. typedef struct poly64x2x4_t {
  336. poly64x2_t val[4];
  337. } poly64x2x4_t;
  338. #endif
  339. #define __ai static inline __attribute__((__always_inline__, __nodebug__))
  340. #ifdef __LITTLE_ENDIAN__
  341. __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  342. uint8x16_t __ret;
  343. __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  344. return __ret;
  345. }
  346. #else
  347. __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  348. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  349. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  350. uint8x16_t __ret;
  351. __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  352. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  353. return __ret;
  354. }
  355. __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  356. uint8x16_t __ret;
  357. __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  358. return __ret;
  359. }
  360. #endif
  361. #ifdef __LITTLE_ENDIAN__
  362. __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  363. uint32x4_t __ret;
  364. __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  365. return __ret;
  366. }
  367. #else
  368. __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  369. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  370. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  371. uint32x4_t __ret;
  372. __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  373. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  374. return __ret;
  375. }
  376. __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  377. uint32x4_t __ret;
  378. __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  379. return __ret;
  380. }
  381. #endif
  382. #ifdef __LITTLE_ENDIAN__
  383. __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  384. uint16x8_t __ret;
  385. __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  386. return __ret;
  387. }
  388. #else
  389. __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  390. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  391. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  392. uint16x8_t __ret;
  393. __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  394. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  395. return __ret;
  396. }
  397. __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  398. uint16x8_t __ret;
  399. __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  400. return __ret;
  401. }
  402. #endif
  403. #ifdef __LITTLE_ENDIAN__
  404. __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
  405. int8x16_t __ret;
  406. __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  407. return __ret;
  408. }
  409. #else
  410. __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
  411. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  412. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  413. int8x16_t __ret;
  414. __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  415. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  416. return __ret;
  417. }
  418. __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
  419. int8x16_t __ret;
  420. __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  421. return __ret;
  422. }
  423. #endif
  424. #ifdef __LITTLE_ENDIAN__
  425. __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
  426. float32x4_t __ret;
  427. __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  428. return __ret;
  429. }
  430. #else
  431. __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
  432. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  433. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  434. float32x4_t __ret;
  435. __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  436. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  437. return __ret;
  438. }
  439. #endif
  440. #ifdef __LITTLE_ENDIAN__
  441. __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
  442. int32x4_t __ret;
  443. __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  444. return __ret;
  445. }
  446. #else
  447. __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
  448. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  449. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  450. int32x4_t __ret;
  451. __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  452. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  453. return __ret;
  454. }
  455. __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
  456. int32x4_t __ret;
  457. __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  458. return __ret;
  459. }
  460. #endif
  461. #ifdef __LITTLE_ENDIAN__
  462. __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
  463. int16x8_t __ret;
  464. __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  465. return __ret;
  466. }
  467. #else
  468. __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
  469. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  470. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  471. int16x8_t __ret;
  472. __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  473. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  474. return __ret;
  475. }
  476. __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
  477. int16x8_t __ret;
  478. __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  479. return __ret;
  480. }
  481. #endif
  482. #ifdef __LITTLE_ENDIAN__
  483. __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  484. uint8x8_t __ret;
  485. __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  486. return __ret;
  487. }
  488. #else
  489. __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  490. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  491. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  492. uint8x8_t __ret;
  493. __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  494. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  495. return __ret;
  496. }
  497. __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  498. uint8x8_t __ret;
  499. __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  500. return __ret;
  501. }
  502. #endif
  503. #ifdef __LITTLE_ENDIAN__
  504. __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  505. uint32x2_t __ret;
  506. __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  507. return __ret;
  508. }
  509. #else
  510. __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  511. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  512. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  513. uint32x2_t __ret;
  514. __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  515. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  516. return __ret;
  517. }
  518. __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  519. uint32x2_t __ret;
  520. __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  521. return __ret;
  522. }
  523. #endif
  524. #ifdef __LITTLE_ENDIAN__
  525. __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  526. uint16x4_t __ret;
  527. __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  528. return __ret;
  529. }
  530. #else
  531. __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  532. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  533. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  534. uint16x4_t __ret;
  535. __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  536. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  537. return __ret;
  538. }
  539. __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  540. uint16x4_t __ret;
  541. __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  542. return __ret;
  543. }
  544. #endif
  545. #ifdef __LITTLE_ENDIAN__
  546. __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
  547. int8x8_t __ret;
  548. __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  549. return __ret;
  550. }
  551. #else
  552. __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
  553. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  554. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  555. int8x8_t __ret;
  556. __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  557. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  558. return __ret;
  559. }
  560. __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
  561. int8x8_t __ret;
  562. __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  563. return __ret;
  564. }
  565. #endif
  566. #ifdef __LITTLE_ENDIAN__
  567. __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
  568. float32x2_t __ret;
  569. __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  570. return __ret;
  571. }
  572. #else
  573. __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
  574. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  575. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  576. float32x2_t __ret;
  577. __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  578. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  579. return __ret;
  580. }
  581. #endif
  582. #ifdef __LITTLE_ENDIAN__
  583. __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
  584. int32x2_t __ret;
  585. __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  586. return __ret;
  587. }
  588. #else
  589. __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
  590. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  591. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  592. int32x2_t __ret;
  593. __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  594. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  595. return __ret;
  596. }
  597. __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
  598. int32x2_t __ret;
  599. __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  600. return __ret;
  601. }
  602. #endif
  603. #ifdef __LITTLE_ENDIAN__
  604. __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
  605. int16x4_t __ret;
  606. __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  607. return __ret;
  608. }
  609. #else
  610. __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
  611. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  612. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  613. int16x4_t __ret;
  614. __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  615. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  616. return __ret;
  617. }
  618. __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
  619. int16x4_t __ret;
  620. __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  621. return __ret;
  622. }
  623. #endif
  624. #ifdef __LITTLE_ENDIAN__
  625. __ai int8x16_t vabsq_s8(int8x16_t __p0) {
  626. int8x16_t __ret;
  627. __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
  628. return __ret;
  629. }
  630. #else
  631. __ai int8x16_t vabsq_s8(int8x16_t __p0) {
  632. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  633. int8x16_t __ret;
  634. __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
  635. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  636. return __ret;
  637. }
  638. #endif
  639. #ifdef __LITTLE_ENDIAN__
  640. __ai float32x4_t vabsq_f32(float32x4_t __p0) {
  641. float32x4_t __ret;
  642. __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
  643. return __ret;
  644. }
  645. #else
  646. __ai float32x4_t vabsq_f32(float32x4_t __p0) {
  647. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  648. float32x4_t __ret;
  649. __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
  650. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  651. return __ret;
  652. }
  653. #endif
  654. #ifdef __LITTLE_ENDIAN__
  655. __ai int32x4_t vabsq_s32(int32x4_t __p0) {
  656. int32x4_t __ret;
  657. __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
  658. return __ret;
  659. }
  660. #else
  661. __ai int32x4_t vabsq_s32(int32x4_t __p0) {
  662. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  663. int32x4_t __ret;
  664. __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
  665. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  666. return __ret;
  667. }
  668. #endif
  669. #ifdef __LITTLE_ENDIAN__
  670. __ai int16x8_t vabsq_s16(int16x8_t __p0) {
  671. int16x8_t __ret;
  672. __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
  673. return __ret;
  674. }
  675. #else
  676. __ai int16x8_t vabsq_s16(int16x8_t __p0) {
  677. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  678. int16x8_t __ret;
  679. __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
  680. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  681. return __ret;
  682. }
  683. #endif
  684. #ifdef __LITTLE_ENDIAN__
  685. __ai int8x8_t vabs_s8(int8x8_t __p0) {
  686. int8x8_t __ret;
  687. __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
  688. return __ret;
  689. }
  690. #else
  691. __ai int8x8_t vabs_s8(int8x8_t __p0) {
  692. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  693. int8x8_t __ret;
  694. __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
  695. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  696. return __ret;
  697. }
  698. #endif
  699. #ifdef __LITTLE_ENDIAN__
  700. __ai float32x2_t vabs_f32(float32x2_t __p0) {
  701. float32x2_t __ret;
  702. __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
  703. return __ret;
  704. }
  705. #else
  706. __ai float32x2_t vabs_f32(float32x2_t __p0) {
  707. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  708. float32x2_t __ret;
  709. __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
  710. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  711. return __ret;
  712. }
  713. #endif
  714. #ifdef __LITTLE_ENDIAN__
  715. __ai int32x2_t vabs_s32(int32x2_t __p0) {
  716. int32x2_t __ret;
  717. __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
  718. return __ret;
  719. }
  720. #else
  721. __ai int32x2_t vabs_s32(int32x2_t __p0) {
  722. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  723. int32x2_t __ret;
  724. __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
  725. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  726. return __ret;
  727. }
  728. #endif
  729. #ifdef __LITTLE_ENDIAN__
  730. __ai int16x4_t vabs_s16(int16x4_t __p0) {
  731. int16x4_t __ret;
  732. __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
  733. return __ret;
  734. }
  735. #else
  736. __ai int16x4_t vabs_s16(int16x4_t __p0) {
  737. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  738. int16x4_t __ret;
  739. __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
  740. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  741. return __ret;
  742. }
  743. #endif
  744. #ifdef __LITTLE_ENDIAN__
  745. __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  746. uint8x16_t __ret;
  747. __ret = __p0 + __p1;
  748. return __ret;
  749. }
  750. #else
  751. __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  752. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  753. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  754. uint8x16_t __ret;
  755. __ret = __rev0 + __rev1;
  756. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  757. return __ret;
  758. }
  759. #endif
  760. #ifdef __LITTLE_ENDIAN__
  761. __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  762. uint32x4_t __ret;
  763. __ret = __p0 + __p1;
  764. return __ret;
  765. }
  766. #else
  767. __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  768. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  769. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  770. uint32x4_t __ret;
  771. __ret = __rev0 + __rev1;
  772. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  773. return __ret;
  774. }
  775. #endif
  776. #ifdef __LITTLE_ENDIAN__
  777. __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  778. uint64x2_t __ret;
  779. __ret = __p0 + __p1;
  780. return __ret;
  781. }
  782. #else
  783. __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  784. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  785. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  786. uint64x2_t __ret;
  787. __ret = __rev0 + __rev1;
  788. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  789. return __ret;
  790. }
  791. #endif
  792. #ifdef __LITTLE_ENDIAN__
  793. __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  794. uint16x8_t __ret;
  795. __ret = __p0 + __p1;
  796. return __ret;
  797. }
  798. #else
  799. __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  800. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  801. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  802. uint16x8_t __ret;
  803. __ret = __rev0 + __rev1;
  804. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  805. return __ret;
  806. }
  807. #endif
  808. #ifdef __LITTLE_ENDIAN__
  809. __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  810. int8x16_t __ret;
  811. __ret = __p0 + __p1;
  812. return __ret;
  813. }
  814. #else
  815. __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  816. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  817. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  818. int8x16_t __ret;
  819. __ret = __rev0 + __rev1;
  820. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  821. return __ret;
  822. }
  823. #endif
  824. #ifdef __LITTLE_ENDIAN__
  825. __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
  826. float32x4_t __ret;
  827. __ret = __p0 + __p1;
  828. return __ret;
  829. }
  830. #else
  831. __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
  832. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  833. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  834. float32x4_t __ret;
  835. __ret = __rev0 + __rev1;
  836. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  837. return __ret;
  838. }
  839. #endif
  840. #ifdef __LITTLE_ENDIAN__
  841. __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  842. int32x4_t __ret;
  843. __ret = __p0 + __p1;
  844. return __ret;
  845. }
  846. #else
  847. __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  848. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  849. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  850. int32x4_t __ret;
  851. __ret = __rev0 + __rev1;
  852. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  853. return __ret;
  854. }
  855. #endif
  856. #ifdef __LITTLE_ENDIAN__
  857. __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  858. int64x2_t __ret;
  859. __ret = __p0 + __p1;
  860. return __ret;
  861. }
  862. #else
  863. __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  864. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  865. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  866. int64x2_t __ret;
  867. __ret = __rev0 + __rev1;
  868. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  869. return __ret;
  870. }
  871. #endif
  872. #ifdef __LITTLE_ENDIAN__
  873. __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  874. int16x8_t __ret;
  875. __ret = __p0 + __p1;
  876. return __ret;
  877. }
  878. #else
  879. __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  880. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  881. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  882. int16x8_t __ret;
  883. __ret = __rev0 + __rev1;
  884. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  885. return __ret;
  886. }
  887. #endif
  888. #ifdef __LITTLE_ENDIAN__
  889. __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  890. uint8x8_t __ret;
  891. __ret = __p0 + __p1;
  892. return __ret;
  893. }
  894. #else
  895. __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  896. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  897. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  898. uint8x8_t __ret;
  899. __ret = __rev0 + __rev1;
  900. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  901. return __ret;
  902. }
  903. #endif
  904. #ifdef __LITTLE_ENDIAN__
  905. __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  906. uint32x2_t __ret;
  907. __ret = __p0 + __p1;
  908. return __ret;
  909. }
  910. #else
  911. __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  912. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  913. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  914. uint32x2_t __ret;
  915. __ret = __rev0 + __rev1;
  916. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  917. return __ret;
  918. }
  919. #endif
  920. #ifdef __LITTLE_ENDIAN__
  921. __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  922. uint64x1_t __ret;
  923. __ret = __p0 + __p1;
  924. return __ret;
  925. }
  926. #else
  927. __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  928. uint64x1_t __ret;
  929. __ret = __p0 + __p1;
  930. return __ret;
  931. }
  932. #endif
  933. #ifdef __LITTLE_ENDIAN__
  934. __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  935. uint16x4_t __ret;
  936. __ret = __p0 + __p1;
  937. return __ret;
  938. }
  939. #else
  940. __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  941. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  942. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  943. uint16x4_t __ret;
  944. __ret = __rev0 + __rev1;
  945. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  946. return __ret;
  947. }
  948. #endif
  949. #ifdef __LITTLE_ENDIAN__
  950. __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
  951. int8x8_t __ret;
  952. __ret = __p0 + __p1;
  953. return __ret;
  954. }
  955. #else
  956. __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
  957. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  958. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  959. int8x8_t __ret;
  960. __ret = __rev0 + __rev1;
  961. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  962. return __ret;
  963. }
  964. #endif
  965. #ifdef __LITTLE_ENDIAN__
  966. __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
  967. float32x2_t __ret;
  968. __ret = __p0 + __p1;
  969. return __ret;
  970. }
  971. #else
  972. __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
  973. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  974. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  975. float32x2_t __ret;
  976. __ret = __rev0 + __rev1;
  977. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  978. return __ret;
  979. }
  980. #endif
  981. #ifdef __LITTLE_ENDIAN__
  982. __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
  983. int32x2_t __ret;
  984. __ret = __p0 + __p1;
  985. return __ret;
  986. }
  987. #else
  988. __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
  989. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  990. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  991. int32x2_t __ret;
  992. __ret = __rev0 + __rev1;
  993. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  994. return __ret;
  995. }
  996. #endif
  997. #ifdef __LITTLE_ENDIAN__
  998. __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
  999. int64x1_t __ret;
  1000. __ret = __p0 + __p1;
  1001. return __ret;
  1002. }
  1003. #else
  1004. __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
  1005. int64x1_t __ret;
  1006. __ret = __p0 + __p1;
  1007. return __ret;
  1008. }
  1009. #endif
  1010. #ifdef __LITTLE_ENDIAN__
  1011. __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
  1012. int16x4_t __ret;
  1013. __ret = __p0 + __p1;
  1014. return __ret;
  1015. }
  1016. #else
  1017. __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
  1018. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1019. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1020. int16x4_t __ret;
  1021. __ret = __rev0 + __rev1;
  1022. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1023. return __ret;
  1024. }
  1025. #endif
  1026. #ifdef __LITTLE_ENDIAN__
  1027. __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1028. uint16x4_t __ret;
  1029. __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  1030. return __ret;
  1031. }
  1032. #else
  1033. __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1034. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1035. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1036. uint16x4_t __ret;
  1037. __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
  1038. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1039. return __ret;
  1040. }
  1041. __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1042. uint16x4_t __ret;
  1043. __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  1044. return __ret;
  1045. }
  1046. #endif
  1047. #ifdef __LITTLE_ENDIAN__
  1048. __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1049. uint32x2_t __ret;
  1050. __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  1051. return __ret;
  1052. }
  1053. #else
  1054. __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1055. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1056. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1057. uint32x2_t __ret;
  1058. __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
  1059. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1060. return __ret;
  1061. }
  1062. __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1063. uint32x2_t __ret;
  1064. __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  1065. return __ret;
  1066. }
  1067. #endif
  1068. #ifdef __LITTLE_ENDIAN__
  1069. __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1070. uint8x8_t __ret;
  1071. __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  1072. return __ret;
  1073. }
  1074. #else
  1075. __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1076. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1077. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1078. uint8x8_t __ret;
  1079. __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
  1080. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1081. return __ret;
  1082. }
  1083. __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1084. uint8x8_t __ret;
  1085. __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  1086. return __ret;
  1087. }
  1088. #endif
  1089. #ifdef __LITTLE_ENDIAN__
  1090. __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  1091. int16x4_t __ret;
  1092. __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  1093. return __ret;
  1094. }
  1095. #else
  1096. __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  1097. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1098. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1099. int16x4_t __ret;
  1100. __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
  1101. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1102. return __ret;
  1103. }
  1104. __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  1105. int16x4_t __ret;
  1106. __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  1107. return __ret;
  1108. }
  1109. #endif
  1110. #ifdef __LITTLE_ENDIAN__
  1111. __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  1112. int32x2_t __ret;
  1113. __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  1114. return __ret;
  1115. }
  1116. #else
  1117. __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  1118. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1119. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1120. int32x2_t __ret;
  1121. __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
  1122. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1123. return __ret;
  1124. }
  1125. __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  1126. int32x2_t __ret;
  1127. __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  1128. return __ret;
  1129. }
  1130. #endif
  1131. #ifdef __LITTLE_ENDIAN__
  1132. __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  1133. int8x8_t __ret;
  1134. __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  1135. return __ret;
  1136. }
  1137. #else
  1138. __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  1139. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1140. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1141. int8x8_t __ret;
  1142. __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
  1143. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1144. return __ret;
  1145. }
  1146. __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  1147. int8x8_t __ret;
  1148. __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  1149. return __ret;
  1150. }
  1151. #endif
  1152. #ifdef __LITTLE_ENDIAN__
  1153. __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  1154. uint8x16_t __ret;
  1155. __ret = __p0 & __p1;
  1156. return __ret;
  1157. }
  1158. #else
  1159. __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  1160. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1161. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1162. uint8x16_t __ret;
  1163. __ret = __rev0 & __rev1;
  1164. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1165. return __ret;
  1166. }
  1167. #endif
  1168. #ifdef __LITTLE_ENDIAN__
  1169. __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1170. uint32x4_t __ret;
  1171. __ret = __p0 & __p1;
  1172. return __ret;
  1173. }
  1174. #else
  1175. __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1176. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1177. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1178. uint32x4_t __ret;
  1179. __ret = __rev0 & __rev1;
  1180. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1181. return __ret;
  1182. }
  1183. #endif
  1184. #ifdef __LITTLE_ENDIAN__
  1185. __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1186. uint64x2_t __ret;
  1187. __ret = __p0 & __p1;
  1188. return __ret;
  1189. }
  1190. #else
  1191. __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1192. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1193. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1194. uint64x2_t __ret;
  1195. __ret = __rev0 & __rev1;
  1196. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1197. return __ret;
  1198. }
  1199. #endif
  1200. #ifdef __LITTLE_ENDIAN__
  1201. __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1202. uint16x8_t __ret;
  1203. __ret = __p0 & __p1;
  1204. return __ret;
  1205. }
  1206. #else
  1207. __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1208. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1209. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1210. uint16x8_t __ret;
  1211. __ret = __rev0 & __rev1;
  1212. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1213. return __ret;
  1214. }
  1215. #endif
  1216. #ifdef __LITTLE_ENDIAN__
  1217. __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
  1218. int8x16_t __ret;
  1219. __ret = __p0 & __p1;
  1220. return __ret;
  1221. }
  1222. #else
  1223. __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
  1224. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1225. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1226. int8x16_t __ret;
  1227. __ret = __rev0 & __rev1;
  1228. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1229. return __ret;
  1230. }
  1231. #endif
  1232. #ifdef __LITTLE_ENDIAN__
  1233. __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
  1234. int32x4_t __ret;
  1235. __ret = __p0 & __p1;
  1236. return __ret;
  1237. }
  1238. #else
  1239. __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
  1240. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1241. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1242. int32x4_t __ret;
  1243. __ret = __rev0 & __rev1;
  1244. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1245. return __ret;
  1246. }
  1247. #endif
  1248. #ifdef __LITTLE_ENDIAN__
  1249. __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
  1250. int64x2_t __ret;
  1251. __ret = __p0 & __p1;
  1252. return __ret;
  1253. }
  1254. #else
  1255. __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
  1256. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1257. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1258. int64x2_t __ret;
  1259. __ret = __rev0 & __rev1;
  1260. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1261. return __ret;
  1262. }
  1263. #endif
  1264. #ifdef __LITTLE_ENDIAN__
  1265. __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
  1266. int16x8_t __ret;
  1267. __ret = __p0 & __p1;
  1268. return __ret;
  1269. }
  1270. #else
  1271. __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
  1272. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1273. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1274. int16x8_t __ret;
  1275. __ret = __rev0 & __rev1;
  1276. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1277. return __ret;
  1278. }
  1279. #endif
  1280. #ifdef __LITTLE_ENDIAN__
  1281. __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
  1282. uint8x8_t __ret;
  1283. __ret = __p0 & __p1;
  1284. return __ret;
  1285. }
  1286. #else
  1287. __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
  1288. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1289. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1290. uint8x8_t __ret;
  1291. __ret = __rev0 & __rev1;
  1292. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1293. return __ret;
  1294. }
  1295. #endif
  1296. #ifdef __LITTLE_ENDIAN__
  1297. __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
  1298. uint32x2_t __ret;
  1299. __ret = __p0 & __p1;
  1300. return __ret;
  1301. }
  1302. #else
  1303. __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
  1304. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1305. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1306. uint32x2_t __ret;
  1307. __ret = __rev0 & __rev1;
  1308. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1309. return __ret;
  1310. }
  1311. #endif
  1312. #ifdef __LITTLE_ENDIAN__
  1313. __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
  1314. uint64x1_t __ret;
  1315. __ret = __p0 & __p1;
  1316. return __ret;
  1317. }
  1318. #else
  1319. __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
  1320. uint64x1_t __ret;
  1321. __ret = __p0 & __p1;
  1322. return __ret;
  1323. }
  1324. #endif
  1325. #ifdef __LITTLE_ENDIAN__
  1326. __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
  1327. uint16x4_t __ret;
  1328. __ret = __p0 & __p1;
  1329. return __ret;
  1330. }
  1331. #else
  1332. __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
  1333. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1334. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1335. uint16x4_t __ret;
  1336. __ret = __rev0 & __rev1;
  1337. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1338. return __ret;
  1339. }
  1340. #endif
  1341. #ifdef __LITTLE_ENDIAN__
  1342. __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
  1343. int8x8_t __ret;
  1344. __ret = __p0 & __p1;
  1345. return __ret;
  1346. }
  1347. #else
  1348. __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
  1349. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1350. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1351. int8x8_t __ret;
  1352. __ret = __rev0 & __rev1;
  1353. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1354. return __ret;
  1355. }
  1356. #endif
  1357. #ifdef __LITTLE_ENDIAN__
  1358. __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
  1359. int32x2_t __ret;
  1360. __ret = __p0 & __p1;
  1361. return __ret;
  1362. }
  1363. #else
  1364. __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
  1365. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1366. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1367. int32x2_t __ret;
  1368. __ret = __rev0 & __rev1;
  1369. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1370. return __ret;
  1371. }
  1372. #endif
  1373. #ifdef __LITTLE_ENDIAN__
  1374. __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
  1375. int64x1_t __ret;
  1376. __ret = __p0 & __p1;
  1377. return __ret;
  1378. }
  1379. #else
  1380. __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
  1381. int64x1_t __ret;
  1382. __ret = __p0 & __p1;
  1383. return __ret;
  1384. }
  1385. #endif
  1386. #ifdef __LITTLE_ENDIAN__
  1387. __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
  1388. int16x4_t __ret;
  1389. __ret = __p0 & __p1;
  1390. return __ret;
  1391. }
  1392. #else
  1393. __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
  1394. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1395. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1396. int16x4_t __ret;
  1397. __ret = __rev0 & __rev1;
  1398. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1399. return __ret;
  1400. }
  1401. #endif
  1402. #ifdef __LITTLE_ENDIAN__
  1403. __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  1404. uint8x16_t __ret;
  1405. __ret = __p0 & ~__p1;
  1406. return __ret;
  1407. }
  1408. #else
  1409. __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  1410. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1411. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1412. uint8x16_t __ret;
  1413. __ret = __rev0 & ~__rev1;
  1414. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1415. return __ret;
  1416. }
  1417. #endif
  1418. #ifdef __LITTLE_ENDIAN__
  1419. __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1420. uint32x4_t __ret;
  1421. __ret = __p0 & ~__p1;
  1422. return __ret;
  1423. }
  1424. #else
  1425. __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  1426. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1427. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1428. uint32x4_t __ret;
  1429. __ret = __rev0 & ~__rev1;
  1430. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1431. return __ret;
  1432. }
  1433. #endif
  1434. #ifdef __LITTLE_ENDIAN__
  1435. __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1436. uint64x2_t __ret;
  1437. __ret = __p0 & ~__p1;
  1438. return __ret;
  1439. }
  1440. #else
  1441. __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  1442. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1443. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1444. uint64x2_t __ret;
  1445. __ret = __rev0 & ~__rev1;
  1446. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1447. return __ret;
  1448. }
  1449. #endif
  1450. #ifdef __LITTLE_ENDIAN__
  1451. __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1452. uint16x8_t __ret;
  1453. __ret = __p0 & ~__p1;
  1454. return __ret;
  1455. }
  1456. #else
  1457. __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  1458. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1459. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1460. uint16x8_t __ret;
  1461. __ret = __rev0 & ~__rev1;
  1462. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1463. return __ret;
  1464. }
  1465. #endif
  1466. #ifdef __LITTLE_ENDIAN__
  1467. __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
  1468. int8x16_t __ret;
  1469. __ret = __p0 & ~__p1;
  1470. return __ret;
  1471. }
  1472. #else
  1473. __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
  1474. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1475. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1476. int8x16_t __ret;
  1477. __ret = __rev0 & ~__rev1;
  1478. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1479. return __ret;
  1480. }
  1481. #endif
  1482. #ifdef __LITTLE_ENDIAN__
  1483. __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
  1484. int32x4_t __ret;
  1485. __ret = __p0 & ~__p1;
  1486. return __ret;
  1487. }
  1488. #else
  1489. __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
  1490. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1491. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1492. int32x4_t __ret;
  1493. __ret = __rev0 & ~__rev1;
  1494. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1495. return __ret;
  1496. }
  1497. #endif
  1498. #ifdef __LITTLE_ENDIAN__
  1499. __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
  1500. int64x2_t __ret;
  1501. __ret = __p0 & ~__p1;
  1502. return __ret;
  1503. }
  1504. #else
  1505. __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
  1506. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1507. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1508. int64x2_t __ret;
  1509. __ret = __rev0 & ~__rev1;
  1510. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1511. return __ret;
  1512. }
  1513. #endif
  1514. #ifdef __LITTLE_ENDIAN__
  1515. __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
  1516. int16x8_t __ret;
  1517. __ret = __p0 & ~__p1;
  1518. return __ret;
  1519. }
  1520. #else
  1521. __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
  1522. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1523. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1524. int16x8_t __ret;
  1525. __ret = __rev0 & ~__rev1;
  1526. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1527. return __ret;
  1528. }
  1529. #endif
  1530. #ifdef __LITTLE_ENDIAN__
  1531. __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
  1532. uint8x8_t __ret;
  1533. __ret = __p0 & ~__p1;
  1534. return __ret;
  1535. }
  1536. #else
  1537. __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
  1538. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1539. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1540. uint8x8_t __ret;
  1541. __ret = __rev0 & ~__rev1;
  1542. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1543. return __ret;
  1544. }
  1545. #endif
  1546. #ifdef __LITTLE_ENDIAN__
  1547. __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
  1548. uint32x2_t __ret;
  1549. __ret = __p0 & ~__p1;
  1550. return __ret;
  1551. }
  1552. #else
  1553. __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
  1554. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1555. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1556. uint32x2_t __ret;
  1557. __ret = __rev0 & ~__rev1;
  1558. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1559. return __ret;
  1560. }
  1561. #endif
  1562. #ifdef __LITTLE_ENDIAN__
  1563. __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
  1564. uint64x1_t __ret;
  1565. __ret = __p0 & ~__p1;
  1566. return __ret;
  1567. }
  1568. #else
  1569. __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
  1570. uint64x1_t __ret;
  1571. __ret = __p0 & ~__p1;
  1572. return __ret;
  1573. }
  1574. #endif
  1575. #ifdef __LITTLE_ENDIAN__
  1576. __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
  1577. uint16x4_t __ret;
  1578. __ret = __p0 & ~__p1;
  1579. return __ret;
  1580. }
  1581. #else
  1582. __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
  1583. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1584. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1585. uint16x4_t __ret;
  1586. __ret = __rev0 & ~__rev1;
  1587. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1588. return __ret;
  1589. }
  1590. #endif
  1591. #ifdef __LITTLE_ENDIAN__
  1592. __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
  1593. int8x8_t __ret;
  1594. __ret = __p0 & ~__p1;
  1595. return __ret;
  1596. }
  1597. #else
  1598. __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
  1599. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1600. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1601. int8x8_t __ret;
  1602. __ret = __rev0 & ~__rev1;
  1603. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1604. return __ret;
  1605. }
  1606. #endif
  1607. #ifdef __LITTLE_ENDIAN__
  1608. __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
  1609. int32x2_t __ret;
  1610. __ret = __p0 & ~__p1;
  1611. return __ret;
  1612. }
  1613. #else
  1614. __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
  1615. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1616. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1617. int32x2_t __ret;
  1618. __ret = __rev0 & ~__rev1;
  1619. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1620. return __ret;
  1621. }
  1622. #endif
  1623. #ifdef __LITTLE_ENDIAN__
  1624. __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
  1625. int64x1_t __ret;
  1626. __ret = __p0 & ~__p1;
  1627. return __ret;
  1628. }
  1629. #else
  1630. __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
  1631. int64x1_t __ret;
  1632. __ret = __p0 & ~__p1;
  1633. return __ret;
  1634. }
  1635. #endif
  1636. #ifdef __LITTLE_ENDIAN__
  1637. __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
  1638. int16x4_t __ret;
  1639. __ret = __p0 & ~__p1;
  1640. return __ret;
  1641. }
  1642. #else
  1643. __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
  1644. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1645. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1646. int16x4_t __ret;
  1647. __ret = __rev0 & ~__rev1;
  1648. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1649. return __ret;
  1650. }
  1651. #endif
  1652. #ifdef __LITTLE_ENDIAN__
  1653. __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
  1654. poly8x8_t __ret;
  1655. __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
  1656. return __ret;
  1657. }
  1658. #else
  1659. __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
  1660. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1661. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1662. poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1663. poly8x8_t __ret;
  1664. __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
  1665. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1666. return __ret;
  1667. }
  1668. #endif
  1669. #ifdef __LITTLE_ENDIAN__
  1670. __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
  1671. poly16x4_t __ret;
  1672. __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
  1673. return __ret;
  1674. }
  1675. #else
  1676. __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
  1677. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1678. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1679. poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  1680. poly16x4_t __ret;
  1681. __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
  1682. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1683. return __ret;
  1684. }
  1685. #endif
  1686. #ifdef __LITTLE_ENDIAN__
  1687. __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
  1688. poly8x16_t __ret;
  1689. __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
  1690. return __ret;
  1691. }
  1692. #else
  1693. __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
  1694. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1695. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1696. poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1697. poly8x16_t __ret;
  1698. __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
  1699. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1700. return __ret;
  1701. }
  1702. #endif
  1703. #ifdef __LITTLE_ENDIAN__
  1704. __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
  1705. poly16x8_t __ret;
  1706. __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
  1707. return __ret;
  1708. }
  1709. #else
  1710. __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
  1711. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1712. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1713. poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1714. poly16x8_t __ret;
  1715. __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
  1716. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1717. return __ret;
  1718. }
  1719. #endif
  1720. #ifdef __LITTLE_ENDIAN__
  1721. __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  1722. uint8x16_t __ret;
  1723. __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
  1724. return __ret;
  1725. }
  1726. #else
  1727. __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  1728. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1729. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1730. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1731. uint8x16_t __ret;
  1732. __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
  1733. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1734. return __ret;
  1735. }
  1736. #endif
  1737. #ifdef __LITTLE_ENDIAN__
  1738. __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  1739. uint32x4_t __ret;
  1740. __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
  1741. return __ret;
  1742. }
  1743. #else
  1744. __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  1745. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1746. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1747. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  1748. uint32x4_t __ret;
  1749. __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
  1750. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1751. return __ret;
  1752. }
  1753. #endif
  1754. #ifdef __LITTLE_ENDIAN__
  1755. __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  1756. uint64x2_t __ret;
  1757. __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
  1758. return __ret;
  1759. }
  1760. #else
  1761. __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  1762. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1763. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1764. uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  1765. uint64x2_t __ret;
  1766. __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
  1767. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1768. return __ret;
  1769. }
  1770. #endif
  1771. #ifdef __LITTLE_ENDIAN__
  1772. __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  1773. uint16x8_t __ret;
  1774. __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
  1775. return __ret;
  1776. }
  1777. #else
  1778. __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  1779. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1780. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1781. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1782. uint16x8_t __ret;
  1783. __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
  1784. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1785. return __ret;
  1786. }
  1787. #endif
  1788. #ifdef __LITTLE_ENDIAN__
  1789. __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  1790. int8x16_t __ret;
  1791. __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
  1792. return __ret;
  1793. }
  1794. #else
  1795. __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  1796. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1797. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1798. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1799. int8x16_t __ret;
  1800. __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
  1801. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  1802. return __ret;
  1803. }
  1804. #endif
  1805. #ifdef __LITTLE_ENDIAN__
  1806. __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  1807. float32x4_t __ret;
  1808. __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
  1809. return __ret;
  1810. }
  1811. #else
  1812. __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  1813. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1814. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1815. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  1816. float32x4_t __ret;
  1817. __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
  1818. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1819. return __ret;
  1820. }
  1821. #endif
  1822. #ifdef __LITTLE_ENDIAN__
  1823. __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  1824. int32x4_t __ret;
  1825. __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
  1826. return __ret;
  1827. }
  1828. #else
  1829. __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  1830. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1831. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1832. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  1833. int32x4_t __ret;
  1834. __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
  1835. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1836. return __ret;
  1837. }
  1838. #endif
  1839. #ifdef __LITTLE_ENDIAN__
  1840. __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  1841. int64x2_t __ret;
  1842. __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
  1843. return __ret;
  1844. }
  1845. #else
  1846. __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  1847. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1848. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1849. int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  1850. int64x2_t __ret;
  1851. __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
  1852. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1853. return __ret;
  1854. }
  1855. #endif
  1856. #ifdef __LITTLE_ENDIAN__
  1857. __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  1858. int16x8_t __ret;
  1859. __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
  1860. return __ret;
  1861. }
  1862. #else
  1863. __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  1864. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1865. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1866. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1867. int16x8_t __ret;
  1868. __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
  1869. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1870. return __ret;
  1871. }
  1872. #endif
  1873. #ifdef __LITTLE_ENDIAN__
  1874. __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  1875. uint8x8_t __ret;
  1876. __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
  1877. return __ret;
  1878. }
  1879. #else
  1880. __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  1881. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1882. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1883. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1884. uint8x8_t __ret;
  1885. __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
  1886. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1887. return __ret;
  1888. }
  1889. #endif
  1890. #ifdef __LITTLE_ENDIAN__
  1891. __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  1892. uint32x2_t __ret;
  1893. __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
  1894. return __ret;
  1895. }
  1896. #else
  1897. __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  1898. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1899. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1900. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  1901. uint32x2_t __ret;
  1902. __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
  1903. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1904. return __ret;
  1905. }
  1906. #endif
  1907. #ifdef __LITTLE_ENDIAN__
  1908. __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
  1909. uint64x1_t __ret;
  1910. __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
  1911. return __ret;
  1912. }
  1913. #else
  1914. __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
  1915. uint64x1_t __ret;
  1916. __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
  1917. return __ret;
  1918. }
  1919. #endif
  1920. #ifdef __LITTLE_ENDIAN__
  1921. __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  1922. uint16x4_t __ret;
  1923. __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
  1924. return __ret;
  1925. }
  1926. #else
  1927. __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  1928. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  1929. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  1930. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  1931. uint16x4_t __ret;
  1932. __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
  1933. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  1934. return __ret;
  1935. }
  1936. #endif
  1937. #ifdef __LITTLE_ENDIAN__
  1938. __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  1939. int8x8_t __ret;
  1940. __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
  1941. return __ret;
  1942. }
  1943. #else
  1944. __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  1945. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  1946. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  1947. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  1948. int8x8_t __ret;
  1949. __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
  1950. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  1951. return __ret;
  1952. }
  1953. #endif
  1954. #ifdef __LITTLE_ENDIAN__
  1955. __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  1956. float32x2_t __ret;
  1957. __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
  1958. return __ret;
  1959. }
  1960. #else
  1961. __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  1962. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1963. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1964. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  1965. float32x2_t __ret;
  1966. __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
  1967. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1968. return __ret;
  1969. }
  1970. #endif
  1971. #ifdef __LITTLE_ENDIAN__
  1972. __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  1973. int32x2_t __ret;
  1974. __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
  1975. return __ret;
  1976. }
  1977. #else
  1978. __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  1979. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  1980. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  1981. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  1982. int32x2_t __ret;
  1983. __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
  1984. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  1985. return __ret;
  1986. }
  1987. #endif
  1988. #ifdef __LITTLE_ENDIAN__
  1989. __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
  1990. int64x1_t __ret;
  1991. __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
  1992. return __ret;
  1993. }
  1994. #else
  1995. __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
  1996. int64x1_t __ret;
  1997. __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
  1998. return __ret;
  1999. }
  2000. #endif
  2001. #ifdef __LITTLE_ENDIAN__
  2002. __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  2003. int16x4_t __ret;
  2004. __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
  2005. return __ret;
  2006. }
  2007. #else
  2008. __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  2009. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2010. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2011. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  2012. int16x4_t __ret;
  2013. __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
  2014. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2015. return __ret;
  2016. }
  2017. #endif
  2018. #ifdef __LITTLE_ENDIAN__
  2019. __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
  2020. uint32x4_t __ret;
  2021. __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  2022. return __ret;
  2023. }
  2024. #else
  2025. __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
  2026. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2027. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2028. uint32x4_t __ret;
  2029. __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  2030. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2031. return __ret;
  2032. }
  2033. #endif
  2034. #ifdef __LITTLE_ENDIAN__
  2035. __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
  2036. uint32x2_t __ret;
  2037. __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  2038. return __ret;
  2039. }
  2040. #else
  2041. __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
  2042. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2043. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2044. uint32x2_t __ret;
  2045. __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  2046. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2047. return __ret;
  2048. }
  2049. #endif
  2050. #ifdef __LITTLE_ENDIAN__
  2051. __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
  2052. uint32x4_t __ret;
  2053. __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  2054. return __ret;
  2055. }
  2056. #else
  2057. __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
  2058. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2059. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2060. uint32x4_t __ret;
  2061. __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  2062. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2063. return __ret;
  2064. }
  2065. #endif
  2066. #ifdef __LITTLE_ENDIAN__
  2067. __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
  2068. uint32x2_t __ret;
  2069. __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  2070. return __ret;
  2071. }
  2072. #else
  2073. __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
  2074. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2075. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2076. uint32x2_t __ret;
  2077. __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  2078. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2079. return __ret;
  2080. }
  2081. #endif
  2082. #ifdef __LITTLE_ENDIAN__
  2083. __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
  2084. uint32x4_t __ret;
  2085. __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  2086. return __ret;
  2087. }
  2088. #else
  2089. __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
  2090. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2091. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2092. uint32x4_t __ret;
  2093. __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  2094. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2095. return __ret;
  2096. }
  2097. #endif
  2098. #ifdef __LITTLE_ENDIAN__
  2099. __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
  2100. uint32x2_t __ret;
  2101. __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  2102. return __ret;
  2103. }
  2104. #else
  2105. __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
  2106. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2107. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2108. uint32x2_t __ret;
  2109. __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  2110. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2111. return __ret;
  2112. }
  2113. #endif
  2114. #ifdef __LITTLE_ENDIAN__
  2115. __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
  2116. uint32x4_t __ret;
  2117. __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  2118. return __ret;
  2119. }
  2120. #else
  2121. __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
  2122. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2123. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2124. uint32x4_t __ret;
  2125. __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  2126. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2127. return __ret;
  2128. }
  2129. #endif
  2130. #ifdef __LITTLE_ENDIAN__
  2131. __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
  2132. uint32x2_t __ret;
  2133. __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  2134. return __ret;
  2135. }
  2136. #else
  2137. __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
  2138. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2139. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2140. uint32x2_t __ret;
  2141. __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  2142. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2143. return __ret;
  2144. }
  2145. #endif
  2146. #ifdef __LITTLE_ENDIAN__
  2147. __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
  2148. uint8x8_t __ret;
  2149. __ret = (uint8x8_t)(__p0 == __p1);
  2150. return __ret;
  2151. }
  2152. #else
  2153. __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
  2154. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2155. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2156. uint8x8_t __ret;
  2157. __ret = (uint8x8_t)(__rev0 == __rev1);
  2158. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2159. return __ret;
  2160. }
  2161. #endif
  2162. #ifdef __LITTLE_ENDIAN__
  2163. __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  2164. uint8x16_t __ret;
  2165. __ret = (uint8x16_t)(__p0 == __p1);
  2166. return __ret;
  2167. }
  2168. #else
  2169. __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  2170. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2171. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2172. uint8x16_t __ret;
  2173. __ret = (uint8x16_t)(__rev0 == __rev1);
  2174. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2175. return __ret;
  2176. }
  2177. #endif
  2178. #ifdef __LITTLE_ENDIAN__
  2179. __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2180. uint8x16_t __ret;
  2181. __ret = (uint8x16_t)(__p0 == __p1);
  2182. return __ret;
  2183. }
  2184. #else
  2185. __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2186. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2187. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2188. uint8x16_t __ret;
  2189. __ret = (uint8x16_t)(__rev0 == __rev1);
  2190. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2191. return __ret;
  2192. }
  2193. #endif
  2194. #ifdef __LITTLE_ENDIAN__
  2195. __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2196. uint32x4_t __ret;
  2197. __ret = (uint32x4_t)(__p0 == __p1);
  2198. return __ret;
  2199. }
  2200. #else
  2201. __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2202. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2203. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2204. uint32x4_t __ret;
  2205. __ret = (uint32x4_t)(__rev0 == __rev1);
  2206. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2207. return __ret;
  2208. }
  2209. #endif
  2210. #ifdef __LITTLE_ENDIAN__
  2211. __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2212. uint16x8_t __ret;
  2213. __ret = (uint16x8_t)(__p0 == __p1);
  2214. return __ret;
  2215. }
  2216. #else
  2217. __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2218. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2219. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2220. uint16x8_t __ret;
  2221. __ret = (uint16x8_t)(__rev0 == __rev1);
  2222. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2223. return __ret;
  2224. }
  2225. #endif
  2226. #ifdef __LITTLE_ENDIAN__
  2227. __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
  2228. uint8x16_t __ret;
  2229. __ret = (uint8x16_t)(__p0 == __p1);
  2230. return __ret;
  2231. }
  2232. #else
  2233. __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
  2234. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2235. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2236. uint8x16_t __ret;
  2237. __ret = (uint8x16_t)(__rev0 == __rev1);
  2238. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2239. return __ret;
  2240. }
  2241. #endif
  2242. #ifdef __LITTLE_ENDIAN__
  2243. __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
  2244. uint32x4_t __ret;
  2245. __ret = (uint32x4_t)(__p0 == __p1);
  2246. return __ret;
  2247. }
  2248. #else
  2249. __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
  2250. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2251. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2252. uint32x4_t __ret;
  2253. __ret = (uint32x4_t)(__rev0 == __rev1);
  2254. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2255. return __ret;
  2256. }
  2257. #endif
  2258. #ifdef __LITTLE_ENDIAN__
  2259. __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
  2260. uint32x4_t __ret;
  2261. __ret = (uint32x4_t)(__p0 == __p1);
  2262. return __ret;
  2263. }
  2264. #else
  2265. __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
  2266. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2267. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2268. uint32x4_t __ret;
  2269. __ret = (uint32x4_t)(__rev0 == __rev1);
  2270. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2271. return __ret;
  2272. }
  2273. #endif
  2274. #ifdef __LITTLE_ENDIAN__
  2275. __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
  2276. uint16x8_t __ret;
  2277. __ret = (uint16x8_t)(__p0 == __p1);
  2278. return __ret;
  2279. }
  2280. #else
  2281. __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
  2282. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2283. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2284. uint16x8_t __ret;
  2285. __ret = (uint16x8_t)(__rev0 == __rev1);
  2286. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2287. return __ret;
  2288. }
  2289. #endif
  2290. #ifdef __LITTLE_ENDIAN__
  2291. __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2292. uint8x8_t __ret;
  2293. __ret = (uint8x8_t)(__p0 == __p1);
  2294. return __ret;
  2295. }
  2296. #else
  2297. __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2298. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2299. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2300. uint8x8_t __ret;
  2301. __ret = (uint8x8_t)(__rev0 == __rev1);
  2302. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2303. return __ret;
  2304. }
  2305. #endif
  2306. #ifdef __LITTLE_ENDIAN__
  2307. __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2308. uint32x2_t __ret;
  2309. __ret = (uint32x2_t)(__p0 == __p1);
  2310. return __ret;
  2311. }
  2312. #else
  2313. __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2314. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2315. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2316. uint32x2_t __ret;
  2317. __ret = (uint32x2_t)(__rev0 == __rev1);
  2318. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2319. return __ret;
  2320. }
  2321. #endif
  2322. #ifdef __LITTLE_ENDIAN__
  2323. __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2324. uint16x4_t __ret;
  2325. __ret = (uint16x4_t)(__p0 == __p1);
  2326. return __ret;
  2327. }
  2328. #else
  2329. __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2330. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2331. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2332. uint16x4_t __ret;
  2333. __ret = (uint16x4_t)(__rev0 == __rev1);
  2334. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2335. return __ret;
  2336. }
  2337. #endif
  2338. #ifdef __LITTLE_ENDIAN__
  2339. __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
  2340. uint8x8_t __ret;
  2341. __ret = (uint8x8_t)(__p0 == __p1);
  2342. return __ret;
  2343. }
  2344. #else
  2345. __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
  2346. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2347. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2348. uint8x8_t __ret;
  2349. __ret = (uint8x8_t)(__rev0 == __rev1);
  2350. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2351. return __ret;
  2352. }
  2353. #endif
  2354. #ifdef __LITTLE_ENDIAN__
  2355. __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
  2356. uint32x2_t __ret;
  2357. __ret = (uint32x2_t)(__p0 == __p1);
  2358. return __ret;
  2359. }
  2360. #else
  2361. __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
  2362. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2363. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2364. uint32x2_t __ret;
  2365. __ret = (uint32x2_t)(__rev0 == __rev1);
  2366. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2367. return __ret;
  2368. }
  2369. #endif
  2370. #ifdef __LITTLE_ENDIAN__
  2371. __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
  2372. uint32x2_t __ret;
  2373. __ret = (uint32x2_t)(__p0 == __p1);
  2374. return __ret;
  2375. }
  2376. #else
  2377. __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
  2378. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2379. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2380. uint32x2_t __ret;
  2381. __ret = (uint32x2_t)(__rev0 == __rev1);
  2382. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2383. return __ret;
  2384. }
  2385. #endif
  2386. #ifdef __LITTLE_ENDIAN__
  2387. __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
  2388. uint16x4_t __ret;
  2389. __ret = (uint16x4_t)(__p0 == __p1);
  2390. return __ret;
  2391. }
  2392. #else
  2393. __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
  2394. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2395. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2396. uint16x4_t __ret;
  2397. __ret = (uint16x4_t)(__rev0 == __rev1);
  2398. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2399. return __ret;
  2400. }
  2401. #endif
  2402. #ifdef __LITTLE_ENDIAN__
  2403. __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2404. uint8x16_t __ret;
  2405. __ret = (uint8x16_t)(__p0 >= __p1);
  2406. return __ret;
  2407. }
  2408. #else
  2409. __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2410. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2411. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2412. uint8x16_t __ret;
  2413. __ret = (uint8x16_t)(__rev0 >= __rev1);
  2414. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2415. return __ret;
  2416. }
  2417. #endif
  2418. #ifdef __LITTLE_ENDIAN__
  2419. __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2420. uint32x4_t __ret;
  2421. __ret = (uint32x4_t)(__p0 >= __p1);
  2422. return __ret;
  2423. }
  2424. #else
  2425. __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2426. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2427. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2428. uint32x4_t __ret;
  2429. __ret = (uint32x4_t)(__rev0 >= __rev1);
  2430. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2431. return __ret;
  2432. }
  2433. #endif
  2434. #ifdef __LITTLE_ENDIAN__
  2435. __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2436. uint16x8_t __ret;
  2437. __ret = (uint16x8_t)(__p0 >= __p1);
  2438. return __ret;
  2439. }
  2440. #else
  2441. __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2442. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2443. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2444. uint16x8_t __ret;
  2445. __ret = (uint16x8_t)(__rev0 >= __rev1);
  2446. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2447. return __ret;
  2448. }
  2449. #endif
  2450. #ifdef __LITTLE_ENDIAN__
  2451. __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
  2452. uint8x16_t __ret;
  2453. __ret = (uint8x16_t)(__p0 >= __p1);
  2454. return __ret;
  2455. }
  2456. #else
  2457. __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
  2458. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2459. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2460. uint8x16_t __ret;
  2461. __ret = (uint8x16_t)(__rev0 >= __rev1);
  2462. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2463. return __ret;
  2464. }
  2465. #endif
  2466. #ifdef __LITTLE_ENDIAN__
  2467. __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
  2468. uint32x4_t __ret;
  2469. __ret = (uint32x4_t)(__p0 >= __p1);
  2470. return __ret;
  2471. }
  2472. #else
  2473. __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
  2474. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2475. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2476. uint32x4_t __ret;
  2477. __ret = (uint32x4_t)(__rev0 >= __rev1);
  2478. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2479. return __ret;
  2480. }
  2481. #endif
  2482. #ifdef __LITTLE_ENDIAN__
  2483. __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
  2484. uint32x4_t __ret;
  2485. __ret = (uint32x4_t)(__p0 >= __p1);
  2486. return __ret;
  2487. }
  2488. #else
  2489. __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
  2490. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2491. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2492. uint32x4_t __ret;
  2493. __ret = (uint32x4_t)(__rev0 >= __rev1);
  2494. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2495. return __ret;
  2496. }
  2497. #endif
  2498. #ifdef __LITTLE_ENDIAN__
  2499. __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
  2500. uint16x8_t __ret;
  2501. __ret = (uint16x8_t)(__p0 >= __p1);
  2502. return __ret;
  2503. }
  2504. #else
  2505. __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
  2506. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2507. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2508. uint16x8_t __ret;
  2509. __ret = (uint16x8_t)(__rev0 >= __rev1);
  2510. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2511. return __ret;
  2512. }
  2513. #endif
  2514. #ifdef __LITTLE_ENDIAN__
  2515. __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2516. uint8x8_t __ret;
  2517. __ret = (uint8x8_t)(__p0 >= __p1);
  2518. return __ret;
  2519. }
  2520. #else
  2521. __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2522. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2523. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2524. uint8x8_t __ret;
  2525. __ret = (uint8x8_t)(__rev0 >= __rev1);
  2526. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2527. return __ret;
  2528. }
  2529. #endif
  2530. #ifdef __LITTLE_ENDIAN__
  2531. __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2532. uint32x2_t __ret;
  2533. __ret = (uint32x2_t)(__p0 >= __p1);
  2534. return __ret;
  2535. }
  2536. #else
  2537. __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2538. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2539. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2540. uint32x2_t __ret;
  2541. __ret = (uint32x2_t)(__rev0 >= __rev1);
  2542. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2543. return __ret;
  2544. }
  2545. #endif
  2546. #ifdef __LITTLE_ENDIAN__
  2547. __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2548. uint16x4_t __ret;
  2549. __ret = (uint16x4_t)(__p0 >= __p1);
  2550. return __ret;
  2551. }
  2552. #else
  2553. __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2554. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2555. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2556. uint16x4_t __ret;
  2557. __ret = (uint16x4_t)(__rev0 >= __rev1);
  2558. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2559. return __ret;
  2560. }
  2561. #endif
  2562. #ifdef __LITTLE_ENDIAN__
  2563. __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
  2564. uint8x8_t __ret;
  2565. __ret = (uint8x8_t)(__p0 >= __p1);
  2566. return __ret;
  2567. }
  2568. #else
  2569. __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
  2570. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2571. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2572. uint8x8_t __ret;
  2573. __ret = (uint8x8_t)(__rev0 >= __rev1);
  2574. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2575. return __ret;
  2576. }
  2577. #endif
  2578. #ifdef __LITTLE_ENDIAN__
  2579. __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
  2580. uint32x2_t __ret;
  2581. __ret = (uint32x2_t)(__p0 >= __p1);
  2582. return __ret;
  2583. }
  2584. #else
  2585. __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
  2586. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2587. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2588. uint32x2_t __ret;
  2589. __ret = (uint32x2_t)(__rev0 >= __rev1);
  2590. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2591. return __ret;
  2592. }
  2593. #endif
  2594. #ifdef __LITTLE_ENDIAN__
  2595. __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
  2596. uint32x2_t __ret;
  2597. __ret = (uint32x2_t)(__p0 >= __p1);
  2598. return __ret;
  2599. }
  2600. #else
  2601. __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
  2602. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2603. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2604. uint32x2_t __ret;
  2605. __ret = (uint32x2_t)(__rev0 >= __rev1);
  2606. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2607. return __ret;
  2608. }
  2609. #endif
  2610. #ifdef __LITTLE_ENDIAN__
  2611. __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
  2612. uint16x4_t __ret;
  2613. __ret = (uint16x4_t)(__p0 >= __p1);
  2614. return __ret;
  2615. }
  2616. #else
  2617. __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
  2618. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2619. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2620. uint16x4_t __ret;
  2621. __ret = (uint16x4_t)(__rev0 >= __rev1);
  2622. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2623. return __ret;
  2624. }
  2625. #endif
  2626. #ifdef __LITTLE_ENDIAN__
  2627. __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2628. uint8x16_t __ret;
  2629. __ret = (uint8x16_t)(__p0 > __p1);
  2630. return __ret;
  2631. }
  2632. #else
  2633. __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2634. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2635. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2636. uint8x16_t __ret;
  2637. __ret = (uint8x16_t)(__rev0 > __rev1);
  2638. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2639. return __ret;
  2640. }
  2641. #endif
  2642. #ifdef __LITTLE_ENDIAN__
  2643. __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2644. uint32x4_t __ret;
  2645. __ret = (uint32x4_t)(__p0 > __p1);
  2646. return __ret;
  2647. }
  2648. #else
  2649. __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2650. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2651. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2652. uint32x4_t __ret;
  2653. __ret = (uint32x4_t)(__rev0 > __rev1);
  2654. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2655. return __ret;
  2656. }
  2657. #endif
  2658. #ifdef __LITTLE_ENDIAN__
  2659. __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2660. uint16x8_t __ret;
  2661. __ret = (uint16x8_t)(__p0 > __p1);
  2662. return __ret;
  2663. }
  2664. #else
  2665. __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2666. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2667. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2668. uint16x8_t __ret;
  2669. __ret = (uint16x8_t)(__rev0 > __rev1);
  2670. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2671. return __ret;
  2672. }
  2673. #endif
  2674. #ifdef __LITTLE_ENDIAN__
  2675. __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
  2676. uint8x16_t __ret;
  2677. __ret = (uint8x16_t)(__p0 > __p1);
  2678. return __ret;
  2679. }
  2680. #else
  2681. __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
  2682. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2683. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2684. uint8x16_t __ret;
  2685. __ret = (uint8x16_t)(__rev0 > __rev1);
  2686. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2687. return __ret;
  2688. }
  2689. #endif
  2690. #ifdef __LITTLE_ENDIAN__
  2691. __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
  2692. uint32x4_t __ret;
  2693. __ret = (uint32x4_t)(__p0 > __p1);
  2694. return __ret;
  2695. }
  2696. #else
  2697. __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
  2698. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2699. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2700. uint32x4_t __ret;
  2701. __ret = (uint32x4_t)(__rev0 > __rev1);
  2702. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2703. return __ret;
  2704. }
  2705. #endif
  2706. #ifdef __LITTLE_ENDIAN__
  2707. __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
  2708. uint32x4_t __ret;
  2709. __ret = (uint32x4_t)(__p0 > __p1);
  2710. return __ret;
  2711. }
  2712. #else
  2713. __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
  2714. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2715. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2716. uint32x4_t __ret;
  2717. __ret = (uint32x4_t)(__rev0 > __rev1);
  2718. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2719. return __ret;
  2720. }
  2721. #endif
  2722. #ifdef __LITTLE_ENDIAN__
  2723. __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
  2724. uint16x8_t __ret;
  2725. __ret = (uint16x8_t)(__p0 > __p1);
  2726. return __ret;
  2727. }
  2728. #else
  2729. __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
  2730. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2731. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2732. uint16x8_t __ret;
  2733. __ret = (uint16x8_t)(__rev0 > __rev1);
  2734. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2735. return __ret;
  2736. }
  2737. #endif
  2738. #ifdef __LITTLE_ENDIAN__
  2739. __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2740. uint8x8_t __ret;
  2741. __ret = (uint8x8_t)(__p0 > __p1);
  2742. return __ret;
  2743. }
  2744. #else
  2745. __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2746. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2747. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2748. uint8x8_t __ret;
  2749. __ret = (uint8x8_t)(__rev0 > __rev1);
  2750. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2751. return __ret;
  2752. }
  2753. #endif
  2754. #ifdef __LITTLE_ENDIAN__
  2755. __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2756. uint32x2_t __ret;
  2757. __ret = (uint32x2_t)(__p0 > __p1);
  2758. return __ret;
  2759. }
  2760. #else
  2761. __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2762. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2763. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2764. uint32x2_t __ret;
  2765. __ret = (uint32x2_t)(__rev0 > __rev1);
  2766. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2767. return __ret;
  2768. }
  2769. #endif
  2770. #ifdef __LITTLE_ENDIAN__
  2771. __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2772. uint16x4_t __ret;
  2773. __ret = (uint16x4_t)(__p0 > __p1);
  2774. return __ret;
  2775. }
  2776. #else
  2777. __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2778. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2779. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2780. uint16x4_t __ret;
  2781. __ret = (uint16x4_t)(__rev0 > __rev1);
  2782. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2783. return __ret;
  2784. }
  2785. #endif
  2786. #ifdef __LITTLE_ENDIAN__
  2787. __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
  2788. uint8x8_t __ret;
  2789. __ret = (uint8x8_t)(__p0 > __p1);
  2790. return __ret;
  2791. }
  2792. #else
  2793. __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
  2794. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2795. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2796. uint8x8_t __ret;
  2797. __ret = (uint8x8_t)(__rev0 > __rev1);
  2798. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2799. return __ret;
  2800. }
  2801. #endif
  2802. #ifdef __LITTLE_ENDIAN__
  2803. __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
  2804. uint32x2_t __ret;
  2805. __ret = (uint32x2_t)(__p0 > __p1);
  2806. return __ret;
  2807. }
  2808. #else
  2809. __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
  2810. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2811. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2812. uint32x2_t __ret;
  2813. __ret = (uint32x2_t)(__rev0 > __rev1);
  2814. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2815. return __ret;
  2816. }
  2817. #endif
  2818. #ifdef __LITTLE_ENDIAN__
  2819. __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
  2820. uint32x2_t __ret;
  2821. __ret = (uint32x2_t)(__p0 > __p1);
  2822. return __ret;
  2823. }
  2824. #else
  2825. __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
  2826. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2827. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2828. uint32x2_t __ret;
  2829. __ret = (uint32x2_t)(__rev0 > __rev1);
  2830. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2831. return __ret;
  2832. }
  2833. #endif
  2834. #ifdef __LITTLE_ENDIAN__
  2835. __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
  2836. uint16x4_t __ret;
  2837. __ret = (uint16x4_t)(__p0 > __p1);
  2838. return __ret;
  2839. }
  2840. #else
  2841. __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
  2842. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2843. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2844. uint16x4_t __ret;
  2845. __ret = (uint16x4_t)(__rev0 > __rev1);
  2846. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2847. return __ret;
  2848. }
  2849. #endif
  2850. #ifdef __LITTLE_ENDIAN__
  2851. __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2852. uint8x16_t __ret;
  2853. __ret = (uint8x16_t)(__p0 <= __p1);
  2854. return __ret;
  2855. }
  2856. #else
  2857. __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  2858. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2859. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2860. uint8x16_t __ret;
  2861. __ret = (uint8x16_t)(__rev0 <= __rev1);
  2862. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2863. return __ret;
  2864. }
  2865. #endif
  2866. #ifdef __LITTLE_ENDIAN__
  2867. __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2868. uint32x4_t __ret;
  2869. __ret = (uint32x4_t)(__p0 <= __p1);
  2870. return __ret;
  2871. }
  2872. #else
  2873. __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  2874. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2875. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2876. uint32x4_t __ret;
  2877. __ret = (uint32x4_t)(__rev0 <= __rev1);
  2878. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2879. return __ret;
  2880. }
  2881. #endif
  2882. #ifdef __LITTLE_ENDIAN__
  2883. __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2884. uint16x8_t __ret;
  2885. __ret = (uint16x8_t)(__p0 <= __p1);
  2886. return __ret;
  2887. }
  2888. #else
  2889. __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  2890. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2891. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2892. uint16x8_t __ret;
  2893. __ret = (uint16x8_t)(__rev0 <= __rev1);
  2894. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2895. return __ret;
  2896. }
  2897. #endif
  2898. #ifdef __LITTLE_ENDIAN__
  2899. __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
  2900. uint8x16_t __ret;
  2901. __ret = (uint8x16_t)(__p0 <= __p1);
  2902. return __ret;
  2903. }
  2904. #else
  2905. __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
  2906. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2907. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2908. uint8x16_t __ret;
  2909. __ret = (uint8x16_t)(__rev0 <= __rev1);
  2910. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  2911. return __ret;
  2912. }
  2913. #endif
  2914. #ifdef __LITTLE_ENDIAN__
  2915. __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
  2916. uint32x4_t __ret;
  2917. __ret = (uint32x4_t)(__p0 <= __p1);
  2918. return __ret;
  2919. }
  2920. #else
  2921. __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
  2922. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2923. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2924. uint32x4_t __ret;
  2925. __ret = (uint32x4_t)(__rev0 <= __rev1);
  2926. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2927. return __ret;
  2928. }
  2929. #endif
  2930. #ifdef __LITTLE_ENDIAN__
  2931. __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
  2932. uint32x4_t __ret;
  2933. __ret = (uint32x4_t)(__p0 <= __p1);
  2934. return __ret;
  2935. }
  2936. #else
  2937. __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
  2938. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  2939. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  2940. uint32x4_t __ret;
  2941. __ret = (uint32x4_t)(__rev0 <= __rev1);
  2942. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  2943. return __ret;
  2944. }
  2945. #endif
  2946. #ifdef __LITTLE_ENDIAN__
  2947. __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
  2948. uint16x8_t __ret;
  2949. __ret = (uint16x8_t)(__p0 <= __p1);
  2950. return __ret;
  2951. }
  2952. #else
  2953. __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
  2954. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2955. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2956. uint16x8_t __ret;
  2957. __ret = (uint16x8_t)(__rev0 <= __rev1);
  2958. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2959. return __ret;
  2960. }
  2961. #endif
  2962. #ifdef __LITTLE_ENDIAN__
  2963. __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2964. uint8x8_t __ret;
  2965. __ret = (uint8x8_t)(__p0 <= __p1);
  2966. return __ret;
  2967. }
  2968. #else
  2969. __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
  2970. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  2971. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  2972. uint8x8_t __ret;
  2973. __ret = (uint8x8_t)(__rev0 <= __rev1);
  2974. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  2975. return __ret;
  2976. }
  2977. #endif
  2978. #ifdef __LITTLE_ENDIAN__
  2979. __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2980. uint32x2_t __ret;
  2981. __ret = (uint32x2_t)(__p0 <= __p1);
  2982. return __ret;
  2983. }
  2984. #else
  2985. __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
  2986. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  2987. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  2988. uint32x2_t __ret;
  2989. __ret = (uint32x2_t)(__rev0 <= __rev1);
  2990. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  2991. return __ret;
  2992. }
  2993. #endif
  2994. #ifdef __LITTLE_ENDIAN__
  2995. __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
  2996. uint16x4_t __ret;
  2997. __ret = (uint16x4_t)(__p0 <= __p1);
  2998. return __ret;
  2999. }
  3000. #else
  3001. __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3002. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3003. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3004. uint16x4_t __ret;
  3005. __ret = (uint16x4_t)(__rev0 <= __rev1);
  3006. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3007. return __ret;
  3008. }
  3009. #endif
  3010. #ifdef __LITTLE_ENDIAN__
  3011. __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
  3012. uint8x8_t __ret;
  3013. __ret = (uint8x8_t)(__p0 <= __p1);
  3014. return __ret;
  3015. }
  3016. #else
  3017. __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
  3018. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3019. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3020. uint8x8_t __ret;
  3021. __ret = (uint8x8_t)(__rev0 <= __rev1);
  3022. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3023. return __ret;
  3024. }
  3025. #endif
  3026. #ifdef __LITTLE_ENDIAN__
  3027. __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
  3028. uint32x2_t __ret;
  3029. __ret = (uint32x2_t)(__p0 <= __p1);
  3030. return __ret;
  3031. }
  3032. #else
  3033. __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
  3034. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3035. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3036. uint32x2_t __ret;
  3037. __ret = (uint32x2_t)(__rev0 <= __rev1);
  3038. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3039. return __ret;
  3040. }
  3041. #endif
  3042. #ifdef __LITTLE_ENDIAN__
  3043. __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
  3044. uint32x2_t __ret;
  3045. __ret = (uint32x2_t)(__p0 <= __p1);
  3046. return __ret;
  3047. }
  3048. #else
  3049. __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
  3050. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3051. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3052. uint32x2_t __ret;
  3053. __ret = (uint32x2_t)(__rev0 <= __rev1);
  3054. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3055. return __ret;
  3056. }
  3057. #endif
  3058. #ifdef __LITTLE_ENDIAN__
  3059. __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
  3060. uint16x4_t __ret;
  3061. __ret = (uint16x4_t)(__p0 <= __p1);
  3062. return __ret;
  3063. }
  3064. #else
  3065. __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
  3066. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3067. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3068. uint16x4_t __ret;
  3069. __ret = (uint16x4_t)(__rev0 <= __rev1);
  3070. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3071. return __ret;
  3072. }
  3073. #endif
  3074. #ifdef __LITTLE_ENDIAN__
  3075. __ai int8x16_t vclsq_s8(int8x16_t __p0) {
  3076. int8x16_t __ret;
  3077. __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
  3078. return __ret;
  3079. }
  3080. #else
  3081. __ai int8x16_t vclsq_s8(int8x16_t __p0) {
  3082. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3083. int8x16_t __ret;
  3084. __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
  3085. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3086. return __ret;
  3087. }
  3088. #endif
  3089. #ifdef __LITTLE_ENDIAN__
  3090. __ai int32x4_t vclsq_s32(int32x4_t __p0) {
  3091. int32x4_t __ret;
  3092. __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
  3093. return __ret;
  3094. }
  3095. #else
  3096. __ai int32x4_t vclsq_s32(int32x4_t __p0) {
  3097. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3098. int32x4_t __ret;
  3099. __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
  3100. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3101. return __ret;
  3102. }
  3103. #endif
  3104. #ifdef __LITTLE_ENDIAN__
  3105. __ai int16x8_t vclsq_s16(int16x8_t __p0) {
  3106. int16x8_t __ret;
  3107. __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
  3108. return __ret;
  3109. }
  3110. #else
  3111. __ai int16x8_t vclsq_s16(int16x8_t __p0) {
  3112. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3113. int16x8_t __ret;
  3114. __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
  3115. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3116. return __ret;
  3117. }
  3118. #endif
  3119. #ifdef __LITTLE_ENDIAN__
  3120. __ai int8x8_t vcls_s8(int8x8_t __p0) {
  3121. int8x8_t __ret;
  3122. __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
  3123. return __ret;
  3124. }
  3125. #else
  3126. __ai int8x8_t vcls_s8(int8x8_t __p0) {
  3127. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3128. int8x8_t __ret;
  3129. __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
  3130. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3131. return __ret;
  3132. }
  3133. #endif
  3134. #ifdef __LITTLE_ENDIAN__
  3135. __ai int32x2_t vcls_s32(int32x2_t __p0) {
  3136. int32x2_t __ret;
  3137. __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
  3138. return __ret;
  3139. }
  3140. #else
  3141. __ai int32x2_t vcls_s32(int32x2_t __p0) {
  3142. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3143. int32x2_t __ret;
  3144. __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
  3145. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3146. return __ret;
  3147. }
  3148. #endif
  3149. #ifdef __LITTLE_ENDIAN__
  3150. __ai int16x4_t vcls_s16(int16x4_t __p0) {
  3151. int16x4_t __ret;
  3152. __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
  3153. return __ret;
  3154. }
  3155. #else
  3156. __ai int16x4_t vcls_s16(int16x4_t __p0) {
  3157. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3158. int16x4_t __ret;
  3159. __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
  3160. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3161. return __ret;
  3162. }
  3163. #endif
  3164. #ifdef __LITTLE_ENDIAN__
  3165. __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  3166. uint8x16_t __ret;
  3167. __ret = (uint8x16_t)(__p0 < __p1);
  3168. return __ret;
  3169. }
  3170. #else
  3171. __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  3172. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3173. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3174. uint8x16_t __ret;
  3175. __ret = (uint8x16_t)(__rev0 < __rev1);
  3176. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3177. return __ret;
  3178. }
  3179. #endif
  3180. #ifdef __LITTLE_ENDIAN__
  3181. __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  3182. uint32x4_t __ret;
  3183. __ret = (uint32x4_t)(__p0 < __p1);
  3184. return __ret;
  3185. }
  3186. #else
  3187. __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  3188. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3189. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3190. uint32x4_t __ret;
  3191. __ret = (uint32x4_t)(__rev0 < __rev1);
  3192. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3193. return __ret;
  3194. }
  3195. #endif
  3196. #ifdef __LITTLE_ENDIAN__
  3197. __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  3198. uint16x8_t __ret;
  3199. __ret = (uint16x8_t)(__p0 < __p1);
  3200. return __ret;
  3201. }
  3202. #else
  3203. __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  3204. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3205. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3206. uint16x8_t __ret;
  3207. __ret = (uint16x8_t)(__rev0 < __rev1);
  3208. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3209. return __ret;
  3210. }
  3211. #endif
  3212. #ifdef __LITTLE_ENDIAN__
  3213. __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
  3214. uint8x16_t __ret;
  3215. __ret = (uint8x16_t)(__p0 < __p1);
  3216. return __ret;
  3217. }
  3218. #else
  3219. __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
  3220. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3221. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3222. uint8x16_t __ret;
  3223. __ret = (uint8x16_t)(__rev0 < __rev1);
  3224. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3225. return __ret;
  3226. }
  3227. #endif
  3228. #ifdef __LITTLE_ENDIAN__
  3229. __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
  3230. uint32x4_t __ret;
  3231. __ret = (uint32x4_t)(__p0 < __p1);
  3232. return __ret;
  3233. }
  3234. #else
  3235. __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
  3236. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3237. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3238. uint32x4_t __ret;
  3239. __ret = (uint32x4_t)(__rev0 < __rev1);
  3240. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3241. return __ret;
  3242. }
  3243. #endif
  3244. #ifdef __LITTLE_ENDIAN__
  3245. __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
  3246. uint32x4_t __ret;
  3247. __ret = (uint32x4_t)(__p0 < __p1);
  3248. return __ret;
  3249. }
  3250. #else
  3251. __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
  3252. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3253. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3254. uint32x4_t __ret;
  3255. __ret = (uint32x4_t)(__rev0 < __rev1);
  3256. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3257. return __ret;
  3258. }
  3259. #endif
  3260. #ifdef __LITTLE_ENDIAN__
  3261. __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
  3262. uint16x8_t __ret;
  3263. __ret = (uint16x8_t)(__p0 < __p1);
  3264. return __ret;
  3265. }
  3266. #else
  3267. __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
  3268. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3269. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3270. uint16x8_t __ret;
  3271. __ret = (uint16x8_t)(__rev0 < __rev1);
  3272. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3273. return __ret;
  3274. }
  3275. #endif
  3276. #ifdef __LITTLE_ENDIAN__
  3277. __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
  3278. uint8x8_t __ret;
  3279. __ret = (uint8x8_t)(__p0 < __p1);
  3280. return __ret;
  3281. }
  3282. #else
  3283. __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
  3284. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3285. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3286. uint8x8_t __ret;
  3287. __ret = (uint8x8_t)(__rev0 < __rev1);
  3288. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3289. return __ret;
  3290. }
  3291. #endif
  3292. #ifdef __LITTLE_ENDIAN__
  3293. __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
  3294. uint32x2_t __ret;
  3295. __ret = (uint32x2_t)(__p0 < __p1);
  3296. return __ret;
  3297. }
  3298. #else
  3299. __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
  3300. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3301. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3302. uint32x2_t __ret;
  3303. __ret = (uint32x2_t)(__rev0 < __rev1);
  3304. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3305. return __ret;
  3306. }
  3307. #endif
  3308. #ifdef __LITTLE_ENDIAN__
  3309. __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3310. uint16x4_t __ret;
  3311. __ret = (uint16x4_t)(__p0 < __p1);
  3312. return __ret;
  3313. }
  3314. #else
  3315. __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3316. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3317. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3318. uint16x4_t __ret;
  3319. __ret = (uint16x4_t)(__rev0 < __rev1);
  3320. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3321. return __ret;
  3322. }
  3323. #endif
  3324. #ifdef __LITTLE_ENDIAN__
  3325. __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
  3326. uint8x8_t __ret;
  3327. __ret = (uint8x8_t)(__p0 < __p1);
  3328. return __ret;
  3329. }
  3330. #else
  3331. __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
  3332. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3333. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3334. uint8x8_t __ret;
  3335. __ret = (uint8x8_t)(__rev0 < __rev1);
  3336. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3337. return __ret;
  3338. }
  3339. #endif
  3340. #ifdef __LITTLE_ENDIAN__
  3341. __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
  3342. uint32x2_t __ret;
  3343. __ret = (uint32x2_t)(__p0 < __p1);
  3344. return __ret;
  3345. }
  3346. #else
  3347. __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
  3348. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3349. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3350. uint32x2_t __ret;
  3351. __ret = (uint32x2_t)(__rev0 < __rev1);
  3352. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3353. return __ret;
  3354. }
  3355. #endif
  3356. #ifdef __LITTLE_ENDIAN__
  3357. __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
  3358. uint32x2_t __ret;
  3359. __ret = (uint32x2_t)(__p0 < __p1);
  3360. return __ret;
  3361. }
  3362. #else
  3363. __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
  3364. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3365. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3366. uint32x2_t __ret;
  3367. __ret = (uint32x2_t)(__rev0 < __rev1);
  3368. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3369. return __ret;
  3370. }
  3371. #endif
  3372. #ifdef __LITTLE_ENDIAN__
  3373. __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
  3374. uint16x4_t __ret;
  3375. __ret = (uint16x4_t)(__p0 < __p1);
  3376. return __ret;
  3377. }
  3378. #else
  3379. __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
  3380. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3381. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3382. uint16x4_t __ret;
  3383. __ret = (uint16x4_t)(__rev0 < __rev1);
  3384. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3385. return __ret;
  3386. }
  3387. #endif
  3388. #ifdef __LITTLE_ENDIAN__
  3389. __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
  3390. uint8x16_t __ret;
  3391. __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
  3392. return __ret;
  3393. }
  3394. #else
  3395. __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
  3396. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3397. uint8x16_t __ret;
  3398. __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
  3399. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3400. return __ret;
  3401. }
  3402. #endif
  3403. #ifdef __LITTLE_ENDIAN__
  3404. __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
  3405. uint32x4_t __ret;
  3406. __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
  3407. return __ret;
  3408. }
  3409. #else
  3410. __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
  3411. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3412. uint32x4_t __ret;
  3413. __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
  3414. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3415. return __ret;
  3416. }
  3417. #endif
  3418. #ifdef __LITTLE_ENDIAN__
  3419. __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
  3420. uint16x8_t __ret;
  3421. __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
  3422. return __ret;
  3423. }
  3424. #else
  3425. __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
  3426. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3427. uint16x8_t __ret;
  3428. __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
  3429. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3430. return __ret;
  3431. }
  3432. #endif
  3433. #ifdef __LITTLE_ENDIAN__
  3434. __ai int8x16_t vclzq_s8(int8x16_t __p0) {
  3435. int8x16_t __ret;
  3436. __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
  3437. return __ret;
  3438. }
  3439. #else
  3440. __ai int8x16_t vclzq_s8(int8x16_t __p0) {
  3441. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3442. int8x16_t __ret;
  3443. __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
  3444. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3445. return __ret;
  3446. }
  3447. #endif
  3448. #ifdef __LITTLE_ENDIAN__
  3449. __ai int32x4_t vclzq_s32(int32x4_t __p0) {
  3450. int32x4_t __ret;
  3451. __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
  3452. return __ret;
  3453. }
  3454. #else
  3455. __ai int32x4_t vclzq_s32(int32x4_t __p0) {
  3456. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3457. int32x4_t __ret;
  3458. __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
  3459. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3460. return __ret;
  3461. }
  3462. #endif
  3463. #ifdef __LITTLE_ENDIAN__
  3464. __ai int16x8_t vclzq_s16(int16x8_t __p0) {
  3465. int16x8_t __ret;
  3466. __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
  3467. return __ret;
  3468. }
  3469. #else
  3470. __ai int16x8_t vclzq_s16(int16x8_t __p0) {
  3471. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3472. int16x8_t __ret;
  3473. __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
  3474. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3475. return __ret;
  3476. }
  3477. #endif
  3478. #ifdef __LITTLE_ENDIAN__
  3479. __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
  3480. uint8x8_t __ret;
  3481. __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
  3482. return __ret;
  3483. }
  3484. #else
  3485. __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
  3486. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3487. uint8x8_t __ret;
  3488. __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
  3489. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3490. return __ret;
  3491. }
  3492. #endif
  3493. #ifdef __LITTLE_ENDIAN__
  3494. __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
  3495. uint32x2_t __ret;
  3496. __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
  3497. return __ret;
  3498. }
  3499. #else
  3500. __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
  3501. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3502. uint32x2_t __ret;
  3503. __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
  3504. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3505. return __ret;
  3506. }
  3507. #endif
  3508. #ifdef __LITTLE_ENDIAN__
  3509. __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
  3510. uint16x4_t __ret;
  3511. __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
  3512. return __ret;
  3513. }
  3514. #else
  3515. __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
  3516. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3517. uint16x4_t __ret;
  3518. __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
  3519. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3520. return __ret;
  3521. }
  3522. #endif
  3523. #ifdef __LITTLE_ENDIAN__
  3524. __ai int8x8_t vclz_s8(int8x8_t __p0) {
  3525. int8x8_t __ret;
  3526. __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
  3527. return __ret;
  3528. }
  3529. #else
  3530. __ai int8x8_t vclz_s8(int8x8_t __p0) {
  3531. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3532. int8x8_t __ret;
  3533. __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
  3534. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3535. return __ret;
  3536. }
  3537. #endif
  3538. #ifdef __LITTLE_ENDIAN__
  3539. __ai int32x2_t vclz_s32(int32x2_t __p0) {
  3540. int32x2_t __ret;
  3541. __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
  3542. return __ret;
  3543. }
  3544. #else
  3545. __ai int32x2_t vclz_s32(int32x2_t __p0) {
  3546. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3547. int32x2_t __ret;
  3548. __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
  3549. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3550. return __ret;
  3551. }
  3552. #endif
  3553. #ifdef __LITTLE_ENDIAN__
  3554. __ai int16x4_t vclz_s16(int16x4_t __p0) {
  3555. int16x4_t __ret;
  3556. __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
  3557. return __ret;
  3558. }
  3559. #else
  3560. __ai int16x4_t vclz_s16(int16x4_t __p0) {
  3561. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3562. int16x4_t __ret;
  3563. __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
  3564. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3565. return __ret;
  3566. }
  3567. #endif
  3568. #ifdef __LITTLE_ENDIAN__
  3569. __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
  3570. poly8x8_t __ret;
  3571. __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
  3572. return __ret;
  3573. }
  3574. #else
  3575. __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
  3576. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3577. poly8x8_t __ret;
  3578. __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
  3579. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3580. return __ret;
  3581. }
  3582. #endif
  3583. #ifdef __LITTLE_ENDIAN__
  3584. __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
  3585. poly8x16_t __ret;
  3586. __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
  3587. return __ret;
  3588. }
  3589. #else
  3590. __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
  3591. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3592. poly8x16_t __ret;
  3593. __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
  3594. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3595. return __ret;
  3596. }
  3597. #endif
  3598. #ifdef __LITTLE_ENDIAN__
  3599. __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
  3600. uint8x16_t __ret;
  3601. __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
  3602. return __ret;
  3603. }
  3604. #else
  3605. __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
  3606. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3607. uint8x16_t __ret;
  3608. __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
  3609. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3610. return __ret;
  3611. }
  3612. #endif
  3613. #ifdef __LITTLE_ENDIAN__
  3614. __ai int8x16_t vcntq_s8(int8x16_t __p0) {
  3615. int8x16_t __ret;
  3616. __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
  3617. return __ret;
  3618. }
  3619. #else
  3620. __ai int8x16_t vcntq_s8(int8x16_t __p0) {
  3621. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3622. int8x16_t __ret;
  3623. __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
  3624. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3625. return __ret;
  3626. }
  3627. #endif
  3628. #ifdef __LITTLE_ENDIAN__
  3629. __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
  3630. uint8x8_t __ret;
  3631. __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
  3632. return __ret;
  3633. }
  3634. #else
  3635. __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
  3636. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3637. uint8x8_t __ret;
  3638. __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
  3639. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3640. return __ret;
  3641. }
  3642. #endif
  3643. #ifdef __LITTLE_ENDIAN__
  3644. __ai int8x8_t vcnt_s8(int8x8_t __p0) {
  3645. int8x8_t __ret;
  3646. __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
  3647. return __ret;
  3648. }
  3649. #else
  3650. __ai int8x8_t vcnt_s8(int8x8_t __p0) {
  3651. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3652. int8x8_t __ret;
  3653. __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
  3654. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3655. return __ret;
  3656. }
  3657. #endif
  3658. #ifdef __LITTLE_ENDIAN__
  3659. __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
  3660. poly8x16_t __ret;
  3661. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3662. return __ret;
  3663. }
  3664. #else
  3665. __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
  3666. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3667. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3668. poly8x16_t __ret;
  3669. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3670. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3671. return __ret;
  3672. }
  3673. #endif
  3674. #ifdef __LITTLE_ENDIAN__
  3675. __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
  3676. poly16x8_t __ret;
  3677. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3678. return __ret;
  3679. }
  3680. #else
  3681. __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
  3682. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3683. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3684. poly16x8_t __ret;
  3685. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
  3686. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3687. return __ret;
  3688. }
  3689. #endif
  3690. #ifdef __LITTLE_ENDIAN__
  3691. __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
  3692. uint8x16_t __ret;
  3693. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3694. return __ret;
  3695. }
  3696. #else
  3697. __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
  3698. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3699. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3700. uint8x16_t __ret;
  3701. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3702. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3703. return __ret;
  3704. }
  3705. __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
  3706. uint8x16_t __ret;
  3707. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3708. return __ret;
  3709. }
  3710. #endif
  3711. #ifdef __LITTLE_ENDIAN__
  3712. __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
  3713. uint32x4_t __ret;
  3714. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3715. return __ret;
  3716. }
  3717. #else
  3718. __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
  3719. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3720. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3721. uint32x4_t __ret;
  3722. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
  3723. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3724. return __ret;
  3725. }
  3726. __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
  3727. uint32x4_t __ret;
  3728. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3729. return __ret;
  3730. }
  3731. #endif
  3732. #ifdef __LITTLE_ENDIAN__
  3733. __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
  3734. uint64x2_t __ret;
  3735. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  3736. return __ret;
  3737. }
  3738. #else
  3739. __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
  3740. uint64x2_t __ret;
  3741. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  3742. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3743. return __ret;
  3744. }
  3745. #endif
  3746. #ifdef __LITTLE_ENDIAN__
  3747. __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3748. uint16x8_t __ret;
  3749. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3750. return __ret;
  3751. }
  3752. #else
  3753. __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3754. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3755. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3756. uint16x8_t __ret;
  3757. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
  3758. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3759. return __ret;
  3760. }
  3761. __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
  3762. uint16x8_t __ret;
  3763. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3764. return __ret;
  3765. }
  3766. #endif
  3767. #ifdef __LITTLE_ENDIAN__
  3768. __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
  3769. int8x16_t __ret;
  3770. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3771. return __ret;
  3772. }
  3773. #else
  3774. __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
  3775. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  3776. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  3777. int8x16_t __ret;
  3778. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3779. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  3780. return __ret;
  3781. }
  3782. __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
  3783. int8x16_t __ret;
  3784. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
  3785. return __ret;
  3786. }
  3787. #endif
  3788. #ifdef __LITTLE_ENDIAN__
  3789. __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
  3790. float32x4_t __ret;
  3791. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3792. return __ret;
  3793. }
  3794. #else
  3795. __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
  3796. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3797. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3798. float32x4_t __ret;
  3799. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
  3800. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3801. return __ret;
  3802. }
  3803. __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
  3804. float32x4_t __ret;
  3805. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3806. return __ret;
  3807. }
  3808. #endif
  3809. #ifdef __LITTLE_ENDIAN__
  3810. __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
  3811. float16x8_t __ret;
  3812. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3813. return __ret;
  3814. }
  3815. #else
  3816. __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
  3817. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3818. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3819. float16x8_t __ret;
  3820. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
  3821. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3822. return __ret;
  3823. }
  3824. __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
  3825. float16x8_t __ret;
  3826. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3827. return __ret;
  3828. }
  3829. #endif
  3830. #ifdef __LITTLE_ENDIAN__
  3831. __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
  3832. int32x4_t __ret;
  3833. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3834. return __ret;
  3835. }
  3836. #else
  3837. __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
  3838. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  3839. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  3840. int32x4_t __ret;
  3841. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
  3842. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  3843. return __ret;
  3844. }
  3845. __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
  3846. int32x4_t __ret;
  3847. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
  3848. return __ret;
  3849. }
  3850. #endif
  3851. #ifdef __LITTLE_ENDIAN__
  3852. __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
  3853. int64x2_t __ret;
  3854. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  3855. return __ret;
  3856. }
  3857. #else
  3858. __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
  3859. int64x2_t __ret;
  3860. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  3861. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  3862. return __ret;
  3863. }
  3864. #endif
  3865. #ifdef __LITTLE_ENDIAN__
  3866. __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
  3867. int16x8_t __ret;
  3868. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3869. return __ret;
  3870. }
  3871. #else
  3872. __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
  3873. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  3874. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  3875. int16x8_t __ret;
  3876. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
  3877. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  3878. return __ret;
  3879. }
  3880. __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
  3881. int16x8_t __ret;
  3882. __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
  3883. return __ret;
  3884. }
  3885. #endif
  3886. #ifdef __LITTLE_ENDIAN__
  3887. __ai poly8x8_t vcreate_p8(uint64_t __p0) {
  3888. poly8x8_t __ret;
  3889. __ret = (poly8x8_t)(__p0);
  3890. return __ret;
  3891. }
  3892. #else
  3893. __ai poly8x8_t vcreate_p8(uint64_t __p0) {
  3894. poly8x8_t __ret;
  3895. __ret = (poly8x8_t)(__p0);
  3896. return __ret;
  3897. }
  3898. #endif
  3899. #ifdef __LITTLE_ENDIAN__
  3900. __ai poly16x4_t vcreate_p16(uint64_t __p0) {
  3901. poly16x4_t __ret;
  3902. __ret = (poly16x4_t)(__p0);
  3903. return __ret;
  3904. }
  3905. #else
  3906. __ai poly16x4_t vcreate_p16(uint64_t __p0) {
  3907. poly16x4_t __ret;
  3908. __ret = (poly16x4_t)(__p0);
  3909. return __ret;
  3910. }
  3911. #endif
  3912. #ifdef __LITTLE_ENDIAN__
  3913. __ai uint8x8_t vcreate_u8(uint64_t __p0) {
  3914. uint8x8_t __ret;
  3915. __ret = (uint8x8_t)(__p0);
  3916. return __ret;
  3917. }
  3918. #else
  3919. __ai uint8x8_t vcreate_u8(uint64_t __p0) {
  3920. uint8x8_t __ret;
  3921. __ret = (uint8x8_t)(__p0);
  3922. return __ret;
  3923. }
  3924. #endif
  3925. #ifdef __LITTLE_ENDIAN__
  3926. __ai uint32x2_t vcreate_u32(uint64_t __p0) {
  3927. uint32x2_t __ret;
  3928. __ret = (uint32x2_t)(__p0);
  3929. return __ret;
  3930. }
  3931. #else
  3932. __ai uint32x2_t vcreate_u32(uint64_t __p0) {
  3933. uint32x2_t __ret;
  3934. __ret = (uint32x2_t)(__p0);
  3935. return __ret;
  3936. }
  3937. #endif
  3938. #ifdef __LITTLE_ENDIAN__
  3939. __ai uint64x1_t vcreate_u64(uint64_t __p0) {
  3940. uint64x1_t __ret;
  3941. __ret = (uint64x1_t)(__p0);
  3942. return __ret;
  3943. }
  3944. #else
  3945. __ai uint64x1_t vcreate_u64(uint64_t __p0) {
  3946. uint64x1_t __ret;
  3947. __ret = (uint64x1_t)(__p0);
  3948. return __ret;
  3949. }
  3950. #endif
  3951. #ifdef __LITTLE_ENDIAN__
  3952. __ai uint16x4_t vcreate_u16(uint64_t __p0) {
  3953. uint16x4_t __ret;
  3954. __ret = (uint16x4_t)(__p0);
  3955. return __ret;
  3956. }
  3957. #else
  3958. __ai uint16x4_t vcreate_u16(uint64_t __p0) {
  3959. uint16x4_t __ret;
  3960. __ret = (uint16x4_t)(__p0);
  3961. return __ret;
  3962. }
  3963. #endif
  3964. #ifdef __LITTLE_ENDIAN__
  3965. __ai int8x8_t vcreate_s8(uint64_t __p0) {
  3966. int8x8_t __ret;
  3967. __ret = (int8x8_t)(__p0);
  3968. return __ret;
  3969. }
  3970. #else
  3971. __ai int8x8_t vcreate_s8(uint64_t __p0) {
  3972. int8x8_t __ret;
  3973. __ret = (int8x8_t)(__p0);
  3974. return __ret;
  3975. }
  3976. #endif
  3977. #ifdef __LITTLE_ENDIAN__
  3978. __ai float32x2_t vcreate_f32(uint64_t __p0) {
  3979. float32x2_t __ret;
  3980. __ret = (float32x2_t)(__p0);
  3981. return __ret;
  3982. }
  3983. #else
  3984. __ai float32x2_t vcreate_f32(uint64_t __p0) {
  3985. float32x2_t __ret;
  3986. __ret = (float32x2_t)(__p0);
  3987. return __ret;
  3988. }
  3989. #endif
  3990. #ifdef __LITTLE_ENDIAN__
  3991. __ai float16x4_t vcreate_f16(uint64_t __p0) {
  3992. float16x4_t __ret;
  3993. __ret = (float16x4_t)(__p0);
  3994. return __ret;
  3995. }
  3996. #else
  3997. __ai float16x4_t vcreate_f16(uint64_t __p0) {
  3998. float16x4_t __ret;
  3999. __ret = (float16x4_t)(__p0);
  4000. return __ret;
  4001. }
  4002. #endif
  4003. #ifdef __LITTLE_ENDIAN__
  4004. __ai int32x2_t vcreate_s32(uint64_t __p0) {
  4005. int32x2_t __ret;
  4006. __ret = (int32x2_t)(__p0);
  4007. return __ret;
  4008. }
  4009. #else
  4010. __ai int32x2_t vcreate_s32(uint64_t __p0) {
  4011. int32x2_t __ret;
  4012. __ret = (int32x2_t)(__p0);
  4013. return __ret;
  4014. }
  4015. #endif
  4016. #ifdef __LITTLE_ENDIAN__
  4017. __ai int64x1_t vcreate_s64(uint64_t __p0) {
  4018. int64x1_t __ret;
  4019. __ret = (int64x1_t)(__p0);
  4020. return __ret;
  4021. }
  4022. #else
  4023. __ai int64x1_t vcreate_s64(uint64_t __p0) {
  4024. int64x1_t __ret;
  4025. __ret = (int64x1_t)(__p0);
  4026. return __ret;
  4027. }
  4028. #endif
  4029. #ifdef __LITTLE_ENDIAN__
  4030. __ai int16x4_t vcreate_s16(uint64_t __p0) {
  4031. int16x4_t __ret;
  4032. __ret = (int16x4_t)(__p0);
  4033. return __ret;
  4034. }
  4035. #else
  4036. __ai int16x4_t vcreate_s16(uint64_t __p0) {
  4037. int16x4_t __ret;
  4038. __ret = (int16x4_t)(__p0);
  4039. return __ret;
  4040. }
  4041. #endif
  4042. #ifdef __LITTLE_ENDIAN__
  4043. __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
  4044. float32x4_t __ret;
  4045. __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
  4046. return __ret;
  4047. }
  4048. #else
  4049. __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
  4050. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  4051. float32x4_t __ret;
  4052. __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
  4053. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4054. return __ret;
  4055. }
  4056. #endif
  4057. #ifdef __LITTLE_ENDIAN__
  4058. __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
  4059. float32x4_t __ret;
  4060. __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
  4061. return __ret;
  4062. }
  4063. #else
  4064. __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
  4065. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  4066. float32x4_t __ret;
  4067. __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
  4068. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4069. return __ret;
  4070. }
  4071. #endif
  4072. #ifdef __LITTLE_ENDIAN__
  4073. __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
  4074. float32x2_t __ret;
  4075. __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
  4076. return __ret;
  4077. }
  4078. #else
  4079. __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
  4080. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  4081. float32x2_t __ret;
  4082. __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
  4083. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4084. return __ret;
  4085. }
  4086. #endif
  4087. #ifdef __LITTLE_ENDIAN__
  4088. __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
  4089. float32x2_t __ret;
  4090. __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
  4091. return __ret;
  4092. }
  4093. #else
  4094. __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
  4095. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  4096. float32x2_t __ret;
  4097. __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
  4098. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4099. return __ret;
  4100. }
  4101. #endif
  4102. #ifdef __LITTLE_ENDIAN__
  4103. #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
  4104. uint32x4_t __s0 = __p0; \
  4105. float32x4_t __ret; \
  4106. __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
  4107. __ret; \
  4108. })
  4109. #else
  4110. #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
  4111. uint32x4_t __s0 = __p0; \
  4112. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4113. float32x4_t __ret; \
  4114. __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
  4115. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4116. __ret; \
  4117. })
  4118. #endif
  4119. #ifdef __LITTLE_ENDIAN__
  4120. #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
  4121. int32x4_t __s0 = __p0; \
  4122. float32x4_t __ret; \
  4123. __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
  4124. __ret; \
  4125. })
  4126. #else
  4127. #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
  4128. int32x4_t __s0 = __p0; \
  4129. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4130. float32x4_t __ret; \
  4131. __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
  4132. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4133. __ret; \
  4134. })
  4135. #endif
  4136. #ifdef __LITTLE_ENDIAN__
  4137. #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
  4138. uint32x2_t __s0 = __p0; \
  4139. float32x2_t __ret; \
  4140. __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
  4141. __ret; \
  4142. })
  4143. #else
  4144. #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
  4145. uint32x2_t __s0 = __p0; \
  4146. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4147. float32x2_t __ret; \
  4148. __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
  4149. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4150. __ret; \
  4151. })
  4152. #endif
  4153. #ifdef __LITTLE_ENDIAN__
  4154. #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
  4155. int32x2_t __s0 = __p0; \
  4156. float32x2_t __ret; \
  4157. __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
  4158. __ret; \
  4159. })
  4160. #else
  4161. #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
  4162. int32x2_t __s0 = __p0; \
  4163. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4164. float32x2_t __ret; \
  4165. __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
  4166. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4167. __ret; \
  4168. })
  4169. #endif
  4170. #ifdef __LITTLE_ENDIAN__
  4171. #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
  4172. float32x4_t __s0 = __p0; \
  4173. int32x4_t __ret; \
  4174. __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
  4175. __ret; \
  4176. })
  4177. #else
  4178. #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
  4179. float32x4_t __s0 = __p0; \
  4180. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4181. int32x4_t __ret; \
  4182. __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
  4183. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4184. __ret; \
  4185. })
  4186. #endif
  4187. #ifdef __LITTLE_ENDIAN__
  4188. #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
  4189. float32x2_t __s0 = __p0; \
  4190. int32x2_t __ret; \
  4191. __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
  4192. __ret; \
  4193. })
  4194. #else
  4195. #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
  4196. float32x2_t __s0 = __p0; \
  4197. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4198. int32x2_t __ret; \
  4199. __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
  4200. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4201. __ret; \
  4202. })
  4203. #endif
  4204. #ifdef __LITTLE_ENDIAN__
  4205. #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
  4206. float32x4_t __s0 = __p0; \
  4207. uint32x4_t __ret; \
  4208. __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
  4209. __ret; \
  4210. })
  4211. #else
  4212. #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
  4213. float32x4_t __s0 = __p0; \
  4214. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4215. uint32x4_t __ret; \
  4216. __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
  4217. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4218. __ret; \
  4219. })
  4220. #endif
  4221. #ifdef __LITTLE_ENDIAN__
  4222. #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
  4223. float32x2_t __s0 = __p0; \
  4224. uint32x2_t __ret; \
  4225. __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
  4226. __ret; \
  4227. })
  4228. #else
  4229. #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
  4230. float32x2_t __s0 = __p0; \
  4231. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4232. uint32x2_t __ret; \
  4233. __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
  4234. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4235. __ret; \
  4236. })
  4237. #endif
  4238. #ifdef __LITTLE_ENDIAN__
  4239. __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
  4240. int32x4_t __ret;
  4241. __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
  4242. return __ret;
  4243. }
  4244. #else
  4245. __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
  4246. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  4247. int32x4_t __ret;
  4248. __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
  4249. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4250. return __ret;
  4251. }
  4252. #endif
  4253. #ifdef __LITTLE_ENDIAN__
  4254. __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
  4255. int32x2_t __ret;
  4256. __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
  4257. return __ret;
  4258. }
  4259. #else
  4260. __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
  4261. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  4262. int32x2_t __ret;
  4263. __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
  4264. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4265. return __ret;
  4266. }
  4267. #endif
  4268. #ifdef __LITTLE_ENDIAN__
  4269. __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
  4270. uint32x4_t __ret;
  4271. __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
  4272. return __ret;
  4273. }
  4274. #else
  4275. __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
  4276. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  4277. uint32x4_t __ret;
  4278. __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
  4279. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4280. return __ret;
  4281. }
  4282. #endif
  4283. #ifdef __LITTLE_ENDIAN__
  4284. __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
  4285. uint32x2_t __ret;
  4286. __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
  4287. return __ret;
  4288. }
  4289. #else
  4290. __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
  4291. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  4292. uint32x2_t __ret;
  4293. __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
  4294. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4295. return __ret;
  4296. }
  4297. #endif
  4298. #ifdef __LITTLE_ENDIAN__
  4299. #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
  4300. poly8x8_t __s0 = __p0; \
  4301. poly8x8_t __ret; \
  4302. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4303. __ret; \
  4304. })
  4305. #else
  4306. #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
  4307. poly8x8_t __s0 = __p0; \
  4308. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4309. poly8x8_t __ret; \
  4310. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4311. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4312. __ret; \
  4313. })
  4314. #endif
  4315. #ifdef __LITTLE_ENDIAN__
  4316. #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
  4317. poly16x4_t __s0 = __p0; \
  4318. poly16x4_t __ret; \
  4319. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4320. __ret; \
  4321. })
  4322. #else
  4323. #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
  4324. poly16x4_t __s0 = __p0; \
  4325. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4326. poly16x4_t __ret; \
  4327. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4328. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4329. __ret; \
  4330. })
  4331. #endif
  4332. #ifdef __LITTLE_ENDIAN__
  4333. #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
  4334. poly8x8_t __s0 = __p0; \
  4335. poly8x16_t __ret; \
  4336. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4337. __ret; \
  4338. })
  4339. #else
  4340. #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
  4341. poly8x8_t __s0 = __p0; \
  4342. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4343. poly8x16_t __ret; \
  4344. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4345. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  4346. __ret; \
  4347. })
  4348. #endif
  4349. #ifdef __LITTLE_ENDIAN__
  4350. #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
  4351. poly16x4_t __s0 = __p0; \
  4352. poly16x8_t __ret; \
  4353. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4354. __ret; \
  4355. })
  4356. #else
  4357. #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
  4358. poly16x4_t __s0 = __p0; \
  4359. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4360. poly16x8_t __ret; \
  4361. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4362. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4363. __ret; \
  4364. })
  4365. #endif
  4366. #ifdef __LITTLE_ENDIAN__
  4367. #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
  4368. uint8x8_t __s0 = __p0; \
  4369. uint8x16_t __ret; \
  4370. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4371. __ret; \
  4372. })
  4373. #else
  4374. #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
  4375. uint8x8_t __s0 = __p0; \
  4376. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4377. uint8x16_t __ret; \
  4378. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4379. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  4380. __ret; \
  4381. })
  4382. #endif
  4383. #ifdef __LITTLE_ENDIAN__
  4384. #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
  4385. uint32x2_t __s0 = __p0; \
  4386. uint32x4_t __ret; \
  4387. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4388. __ret; \
  4389. })
  4390. #else
  4391. #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
  4392. uint32x2_t __s0 = __p0; \
  4393. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4394. uint32x4_t __ret; \
  4395. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4396. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4397. __ret; \
  4398. })
  4399. #endif
  4400. #ifdef __LITTLE_ENDIAN__
  4401. #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
  4402. uint64x1_t __s0 = __p0; \
  4403. uint64x2_t __ret; \
  4404. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4405. __ret; \
  4406. })
  4407. #else
  4408. #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
  4409. uint64x1_t __s0 = __p0; \
  4410. uint64x2_t __ret; \
  4411. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4412. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4413. __ret; \
  4414. })
  4415. #endif
  4416. #ifdef __LITTLE_ENDIAN__
  4417. #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
  4418. uint16x4_t __s0 = __p0; \
  4419. uint16x8_t __ret; \
  4420. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4421. __ret; \
  4422. })
  4423. #else
  4424. #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
  4425. uint16x4_t __s0 = __p0; \
  4426. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4427. uint16x8_t __ret; \
  4428. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4429. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4430. __ret; \
  4431. })
  4432. #endif
  4433. #ifdef __LITTLE_ENDIAN__
  4434. #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
  4435. int8x8_t __s0 = __p0; \
  4436. int8x16_t __ret; \
  4437. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4438. __ret; \
  4439. })
  4440. #else
  4441. #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
  4442. int8x8_t __s0 = __p0; \
  4443. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4444. int8x16_t __ret; \
  4445. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4446. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  4447. __ret; \
  4448. })
  4449. #endif
  4450. #ifdef __LITTLE_ENDIAN__
  4451. #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
  4452. float32x2_t __s0 = __p0; \
  4453. float32x4_t __ret; \
  4454. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4455. __ret; \
  4456. })
  4457. #else
  4458. #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
  4459. float32x2_t __s0 = __p0; \
  4460. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4461. float32x4_t __ret; \
  4462. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4463. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4464. __ret; \
  4465. })
  4466. #endif
  4467. #ifdef __LITTLE_ENDIAN__
  4468. #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
  4469. int32x2_t __s0 = __p0; \
  4470. int32x4_t __ret; \
  4471. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4472. __ret; \
  4473. })
  4474. #else
  4475. #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
  4476. int32x2_t __s0 = __p0; \
  4477. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4478. int32x4_t __ret; \
  4479. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4480. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4481. __ret; \
  4482. })
  4483. #endif
  4484. #ifdef __LITTLE_ENDIAN__
  4485. #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
  4486. int64x1_t __s0 = __p0; \
  4487. int64x2_t __ret; \
  4488. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4489. __ret; \
  4490. })
  4491. #else
  4492. #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
  4493. int64x1_t __s0 = __p0; \
  4494. int64x2_t __ret; \
  4495. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4496. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4497. __ret; \
  4498. })
  4499. #endif
  4500. #ifdef __LITTLE_ENDIAN__
  4501. #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
  4502. int16x4_t __s0 = __p0; \
  4503. int16x8_t __ret; \
  4504. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4505. __ret; \
  4506. })
  4507. #else
  4508. #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
  4509. int16x4_t __s0 = __p0; \
  4510. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4511. int16x8_t __ret; \
  4512. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4513. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4514. __ret; \
  4515. })
  4516. #endif
  4517. #ifdef __LITTLE_ENDIAN__
  4518. #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
  4519. uint8x8_t __s0 = __p0; \
  4520. uint8x8_t __ret; \
  4521. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4522. __ret; \
  4523. })
  4524. #else
  4525. #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
  4526. uint8x8_t __s0 = __p0; \
  4527. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4528. uint8x8_t __ret; \
  4529. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4530. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4531. __ret; \
  4532. })
  4533. #endif
  4534. #ifdef __LITTLE_ENDIAN__
  4535. #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
  4536. uint32x2_t __s0 = __p0; \
  4537. uint32x2_t __ret; \
  4538. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4539. __ret; \
  4540. })
  4541. #else
  4542. #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
  4543. uint32x2_t __s0 = __p0; \
  4544. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4545. uint32x2_t __ret; \
  4546. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  4547. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4548. __ret; \
  4549. })
  4550. #endif
  4551. #ifdef __LITTLE_ENDIAN__
  4552. #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
  4553. uint64x1_t __s0 = __p0; \
  4554. uint64x1_t __ret; \
  4555. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  4556. __ret; \
  4557. })
  4558. #else
  4559. #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
  4560. uint64x1_t __s0 = __p0; \
  4561. uint64x1_t __ret; \
  4562. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  4563. __ret; \
  4564. })
  4565. #endif
  4566. #ifdef __LITTLE_ENDIAN__
  4567. #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
  4568. uint16x4_t __s0 = __p0; \
  4569. uint16x4_t __ret; \
  4570. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4571. __ret; \
  4572. })
  4573. #else
  4574. #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
  4575. uint16x4_t __s0 = __p0; \
  4576. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4577. uint16x4_t __ret; \
  4578. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4579. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4580. __ret; \
  4581. })
  4582. #endif
  4583. #ifdef __LITTLE_ENDIAN__
  4584. #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
  4585. int8x8_t __s0 = __p0; \
  4586. int8x8_t __ret; \
  4587. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4588. __ret; \
  4589. })
  4590. #else
  4591. #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
  4592. int8x8_t __s0 = __p0; \
  4593. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  4594. int8x8_t __ret; \
  4595. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  4596. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4597. __ret; \
  4598. })
  4599. #endif
  4600. #ifdef __LITTLE_ENDIAN__
  4601. #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
  4602. float32x2_t __s0 = __p0; \
  4603. float32x2_t __ret; \
  4604. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4605. __ret; \
  4606. })
  4607. #else
  4608. #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
  4609. float32x2_t __s0 = __p0; \
  4610. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4611. float32x2_t __ret; \
  4612. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  4613. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4614. __ret; \
  4615. })
  4616. #endif
  4617. #ifdef __LITTLE_ENDIAN__
  4618. #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
  4619. int32x2_t __s0 = __p0; \
  4620. int32x2_t __ret; \
  4621. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  4622. __ret; \
  4623. })
  4624. #else
  4625. #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
  4626. int32x2_t __s0 = __p0; \
  4627. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  4628. int32x2_t __ret; \
  4629. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  4630. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  4631. __ret; \
  4632. })
  4633. #endif
  4634. #ifdef __LITTLE_ENDIAN__
  4635. #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
  4636. int64x1_t __s0 = __p0; \
  4637. int64x1_t __ret; \
  4638. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  4639. __ret; \
  4640. })
  4641. #else
  4642. #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
  4643. int64x1_t __s0 = __p0; \
  4644. int64x1_t __ret; \
  4645. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  4646. __ret; \
  4647. })
  4648. #endif
  4649. #ifdef __LITTLE_ENDIAN__
  4650. #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
  4651. int16x4_t __s0 = __p0; \
  4652. int16x4_t __ret; \
  4653. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  4654. __ret; \
  4655. })
  4656. #else
  4657. #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
  4658. int16x4_t __s0 = __p0; \
  4659. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  4660. int16x4_t __ret; \
  4661. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  4662. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4663. __ret; \
  4664. })
  4665. #endif
  4666. #ifdef __LITTLE_ENDIAN__
  4667. __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
  4668. poly8x8_t __ret;
  4669. __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4670. return __ret;
  4671. }
  4672. #else
  4673. __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
  4674. poly8x8_t __ret;
  4675. __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4676. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4677. return __ret;
  4678. }
  4679. #endif
  4680. #ifdef __LITTLE_ENDIAN__
  4681. __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
  4682. poly16x4_t __ret;
  4683. __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
  4684. return __ret;
  4685. }
  4686. #else
  4687. __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
  4688. poly16x4_t __ret;
  4689. __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
  4690. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4691. return __ret;
  4692. }
  4693. #endif
  4694. #ifdef __LITTLE_ENDIAN__
  4695. __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
  4696. poly8x16_t __ret;
  4697. __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4698. return __ret;
  4699. }
  4700. #else
  4701. __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
  4702. poly8x16_t __ret;
  4703. __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4704. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  4705. return __ret;
  4706. }
  4707. #endif
  4708. #ifdef __LITTLE_ENDIAN__
  4709. __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
  4710. poly16x8_t __ret;
  4711. __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4712. return __ret;
  4713. }
  4714. #else
  4715. __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
  4716. poly16x8_t __ret;
  4717. __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4718. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4719. return __ret;
  4720. }
  4721. #endif
  4722. #ifdef __LITTLE_ENDIAN__
  4723. __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
  4724. uint8x16_t __ret;
  4725. __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4726. return __ret;
  4727. }
  4728. #else
  4729. __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
  4730. uint8x16_t __ret;
  4731. __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4732. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  4733. return __ret;
  4734. }
  4735. #endif
  4736. #ifdef __LITTLE_ENDIAN__
  4737. __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
  4738. uint32x4_t __ret;
  4739. __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
  4740. return __ret;
  4741. }
  4742. #else
  4743. __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
  4744. uint32x4_t __ret;
  4745. __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
  4746. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4747. return __ret;
  4748. }
  4749. #endif
  4750. #ifdef __LITTLE_ENDIAN__
  4751. __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
  4752. uint64x2_t __ret;
  4753. __ret = (uint64x2_t) {__p0, __p0};
  4754. return __ret;
  4755. }
  4756. #else
  4757. __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
  4758. uint64x2_t __ret;
  4759. __ret = (uint64x2_t) {__p0, __p0};
  4760. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4761. return __ret;
  4762. }
  4763. #endif
  4764. #ifdef __LITTLE_ENDIAN__
  4765. __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
  4766. uint16x8_t __ret;
  4767. __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4768. return __ret;
  4769. }
  4770. #else
  4771. __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
  4772. uint16x8_t __ret;
  4773. __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4774. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4775. return __ret;
  4776. }
  4777. #endif
  4778. #ifdef __LITTLE_ENDIAN__
  4779. __ai int8x16_t vdupq_n_s8(int8_t __p0) {
  4780. int8x16_t __ret;
  4781. __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4782. return __ret;
  4783. }
  4784. #else
  4785. __ai int8x16_t vdupq_n_s8(int8_t __p0) {
  4786. int8x16_t __ret;
  4787. __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4788. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  4789. return __ret;
  4790. }
  4791. #endif
  4792. #ifdef __LITTLE_ENDIAN__
  4793. __ai float32x4_t vdupq_n_f32(float32_t __p0) {
  4794. float32x4_t __ret;
  4795. __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
  4796. return __ret;
  4797. }
  4798. #else
  4799. __ai float32x4_t vdupq_n_f32(float32_t __p0) {
  4800. float32x4_t __ret;
  4801. __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
  4802. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4803. return __ret;
  4804. }
  4805. #endif
  4806. #ifdef __LITTLE_ENDIAN__
  4807. #define vdupq_n_f16(__p0) __extension__ ({ \
  4808. float16_t __s0 = __p0; \
  4809. float16x8_t __ret; \
  4810. __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
  4811. __ret; \
  4812. })
  4813. #else
  4814. #define vdupq_n_f16(__p0) __extension__ ({ \
  4815. float16_t __s0 = __p0; \
  4816. float16x8_t __ret; \
  4817. __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
  4818. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  4819. __ret; \
  4820. })
  4821. #endif
  4822. #ifdef __LITTLE_ENDIAN__
  4823. __ai int32x4_t vdupq_n_s32(int32_t __p0) {
  4824. int32x4_t __ret;
  4825. __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
  4826. return __ret;
  4827. }
  4828. #else
  4829. __ai int32x4_t vdupq_n_s32(int32_t __p0) {
  4830. int32x4_t __ret;
  4831. __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
  4832. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4833. return __ret;
  4834. }
  4835. #endif
  4836. #ifdef __LITTLE_ENDIAN__
  4837. __ai int64x2_t vdupq_n_s64(int64_t __p0) {
  4838. int64x2_t __ret;
  4839. __ret = (int64x2_t) {__p0, __p0};
  4840. return __ret;
  4841. }
  4842. #else
  4843. __ai int64x2_t vdupq_n_s64(int64_t __p0) {
  4844. int64x2_t __ret;
  4845. __ret = (int64x2_t) {__p0, __p0};
  4846. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4847. return __ret;
  4848. }
  4849. #endif
  4850. #ifdef __LITTLE_ENDIAN__
  4851. __ai int16x8_t vdupq_n_s16(int16_t __p0) {
  4852. int16x8_t __ret;
  4853. __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4854. return __ret;
  4855. }
  4856. #else
  4857. __ai int16x8_t vdupq_n_s16(int16_t __p0) {
  4858. int16x8_t __ret;
  4859. __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4860. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4861. return __ret;
  4862. }
  4863. #endif
  4864. #ifdef __LITTLE_ENDIAN__
  4865. __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
  4866. uint8x8_t __ret;
  4867. __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4868. return __ret;
  4869. }
  4870. #else
  4871. __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
  4872. uint8x8_t __ret;
  4873. __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4874. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4875. return __ret;
  4876. }
  4877. #endif
  4878. #ifdef __LITTLE_ENDIAN__
  4879. __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
  4880. uint32x2_t __ret;
  4881. __ret = (uint32x2_t) {__p0, __p0};
  4882. return __ret;
  4883. }
  4884. #else
  4885. __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
  4886. uint32x2_t __ret;
  4887. __ret = (uint32x2_t) {__p0, __p0};
  4888. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4889. return __ret;
  4890. }
  4891. #endif
  4892. #ifdef __LITTLE_ENDIAN__
  4893. __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
  4894. uint64x1_t __ret;
  4895. __ret = (uint64x1_t) {__p0};
  4896. return __ret;
  4897. }
  4898. #else
  4899. __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
  4900. uint64x1_t __ret;
  4901. __ret = (uint64x1_t) {__p0};
  4902. return __ret;
  4903. }
  4904. #endif
  4905. #ifdef __LITTLE_ENDIAN__
  4906. __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
  4907. uint16x4_t __ret;
  4908. __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
  4909. return __ret;
  4910. }
  4911. #else
  4912. __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
  4913. uint16x4_t __ret;
  4914. __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
  4915. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  4916. return __ret;
  4917. }
  4918. #endif
  4919. #ifdef __LITTLE_ENDIAN__
  4920. __ai int8x8_t vdup_n_s8(int8_t __p0) {
  4921. int8x8_t __ret;
  4922. __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4923. return __ret;
  4924. }
  4925. #else
  4926. __ai int8x8_t vdup_n_s8(int8_t __p0) {
  4927. int8x8_t __ret;
  4928. __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  4929. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  4930. return __ret;
  4931. }
  4932. #endif
  4933. #ifdef __LITTLE_ENDIAN__
  4934. __ai float32x2_t vdup_n_f32(float32_t __p0) {
  4935. float32x2_t __ret;
  4936. __ret = (float32x2_t) {__p0, __p0};
  4937. return __ret;
  4938. }
  4939. #else
  4940. __ai float32x2_t vdup_n_f32(float32_t __p0) {
  4941. float32x2_t __ret;
  4942. __ret = (float32x2_t) {__p0, __p0};
  4943. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4944. return __ret;
  4945. }
  4946. #endif
  4947. #ifdef __LITTLE_ENDIAN__
  4948. #define vdup_n_f16(__p0) __extension__ ({ \
  4949. float16_t __s0 = __p0; \
  4950. float16x4_t __ret; \
  4951. __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
  4952. __ret; \
  4953. })
  4954. #else
  4955. #define vdup_n_f16(__p0) __extension__ ({ \
  4956. float16_t __s0 = __p0; \
  4957. float16x4_t __ret; \
  4958. __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
  4959. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  4960. __ret; \
  4961. })
  4962. #endif
  4963. #ifdef __LITTLE_ENDIAN__
  4964. __ai int32x2_t vdup_n_s32(int32_t __p0) {
  4965. int32x2_t __ret;
  4966. __ret = (int32x2_t) {__p0, __p0};
  4967. return __ret;
  4968. }
  4969. #else
  4970. __ai int32x2_t vdup_n_s32(int32_t __p0) {
  4971. int32x2_t __ret;
  4972. __ret = (int32x2_t) {__p0, __p0};
  4973. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  4974. return __ret;
  4975. }
  4976. #endif
  4977. #ifdef __LITTLE_ENDIAN__
  4978. __ai int64x1_t vdup_n_s64(int64_t __p0) {
  4979. int64x1_t __ret;
  4980. __ret = (int64x1_t) {__p0};
  4981. return __ret;
  4982. }
  4983. #else
  4984. __ai int64x1_t vdup_n_s64(int64_t __p0) {
  4985. int64x1_t __ret;
  4986. __ret = (int64x1_t) {__p0};
  4987. return __ret;
  4988. }
  4989. #endif
  4990. #ifdef __LITTLE_ENDIAN__
  4991. __ai int16x4_t vdup_n_s16(int16_t __p0) {
  4992. int16x4_t __ret;
  4993. __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
  4994. return __ret;
  4995. }
  4996. #else
  4997. __ai int16x4_t vdup_n_s16(int16_t __p0) {
  4998. int16x4_t __ret;
  4999. __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
  5000. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5001. return __ret;
  5002. }
  5003. #endif
  5004. #ifdef __LITTLE_ENDIAN__
  5005. __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  5006. uint8x16_t __ret;
  5007. __ret = __p0 ^ __p1;
  5008. return __ret;
  5009. }
  5010. #else
  5011. __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  5012. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5013. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5014. uint8x16_t __ret;
  5015. __ret = __rev0 ^ __rev1;
  5016. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5017. return __ret;
  5018. }
  5019. #endif
  5020. #ifdef __LITTLE_ENDIAN__
  5021. __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  5022. uint32x4_t __ret;
  5023. __ret = __p0 ^ __p1;
  5024. return __ret;
  5025. }
  5026. #else
  5027. __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  5028. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5029. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  5030. uint32x4_t __ret;
  5031. __ret = __rev0 ^ __rev1;
  5032. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5033. return __ret;
  5034. }
  5035. #endif
  5036. #ifdef __LITTLE_ENDIAN__
  5037. __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  5038. uint64x2_t __ret;
  5039. __ret = __p0 ^ __p1;
  5040. return __ret;
  5041. }
  5042. #else
  5043. __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  5044. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5045. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  5046. uint64x2_t __ret;
  5047. __ret = __rev0 ^ __rev1;
  5048. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5049. return __ret;
  5050. }
  5051. #endif
  5052. #ifdef __LITTLE_ENDIAN__
  5053. __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  5054. uint16x8_t __ret;
  5055. __ret = __p0 ^ __p1;
  5056. return __ret;
  5057. }
  5058. #else
  5059. __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  5060. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5061. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  5062. uint16x8_t __ret;
  5063. __ret = __rev0 ^ __rev1;
  5064. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5065. return __ret;
  5066. }
  5067. #endif
  5068. #ifdef __LITTLE_ENDIAN__
  5069. __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
  5070. int8x16_t __ret;
  5071. __ret = __p0 ^ __p1;
  5072. return __ret;
  5073. }
  5074. #else
  5075. __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
  5076. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5077. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5078. int8x16_t __ret;
  5079. __ret = __rev0 ^ __rev1;
  5080. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5081. return __ret;
  5082. }
  5083. #endif
  5084. #ifdef __LITTLE_ENDIAN__
  5085. __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
  5086. int32x4_t __ret;
  5087. __ret = __p0 ^ __p1;
  5088. return __ret;
  5089. }
  5090. #else
  5091. __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
  5092. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5093. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  5094. int32x4_t __ret;
  5095. __ret = __rev0 ^ __rev1;
  5096. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5097. return __ret;
  5098. }
  5099. #endif
  5100. #ifdef __LITTLE_ENDIAN__
  5101. __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
  5102. int64x2_t __ret;
  5103. __ret = __p0 ^ __p1;
  5104. return __ret;
  5105. }
  5106. #else
  5107. __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
  5108. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5109. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  5110. int64x2_t __ret;
  5111. __ret = __rev0 ^ __rev1;
  5112. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5113. return __ret;
  5114. }
  5115. #endif
  5116. #ifdef __LITTLE_ENDIAN__
  5117. __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
  5118. int16x8_t __ret;
  5119. __ret = __p0 ^ __p1;
  5120. return __ret;
  5121. }
  5122. #else
  5123. __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
  5124. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5125. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  5126. int16x8_t __ret;
  5127. __ret = __rev0 ^ __rev1;
  5128. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5129. return __ret;
  5130. }
  5131. #endif
  5132. #ifdef __LITTLE_ENDIAN__
  5133. __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
  5134. uint8x8_t __ret;
  5135. __ret = __p0 ^ __p1;
  5136. return __ret;
  5137. }
  5138. #else
  5139. __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
  5140. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5141. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  5142. uint8x8_t __ret;
  5143. __ret = __rev0 ^ __rev1;
  5144. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5145. return __ret;
  5146. }
  5147. #endif
  5148. #ifdef __LITTLE_ENDIAN__
  5149. __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
  5150. uint32x2_t __ret;
  5151. __ret = __p0 ^ __p1;
  5152. return __ret;
  5153. }
  5154. #else
  5155. __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
  5156. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5157. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  5158. uint32x2_t __ret;
  5159. __ret = __rev0 ^ __rev1;
  5160. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5161. return __ret;
  5162. }
  5163. #endif
  5164. #ifdef __LITTLE_ENDIAN__
  5165. __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
  5166. uint64x1_t __ret;
  5167. __ret = __p0 ^ __p1;
  5168. return __ret;
  5169. }
  5170. #else
  5171. __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
  5172. uint64x1_t __ret;
  5173. __ret = __p0 ^ __p1;
  5174. return __ret;
  5175. }
  5176. #endif
  5177. #ifdef __LITTLE_ENDIAN__
  5178. __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
  5179. uint16x4_t __ret;
  5180. __ret = __p0 ^ __p1;
  5181. return __ret;
  5182. }
  5183. #else
  5184. __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
  5185. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5186. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  5187. uint16x4_t __ret;
  5188. __ret = __rev0 ^ __rev1;
  5189. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5190. return __ret;
  5191. }
  5192. #endif
  5193. #ifdef __LITTLE_ENDIAN__
  5194. __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
  5195. int8x8_t __ret;
  5196. __ret = __p0 ^ __p1;
  5197. return __ret;
  5198. }
  5199. #else
  5200. __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
  5201. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5202. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  5203. int8x8_t __ret;
  5204. __ret = __rev0 ^ __rev1;
  5205. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5206. return __ret;
  5207. }
  5208. #endif
  5209. #ifdef __LITTLE_ENDIAN__
  5210. __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
  5211. int32x2_t __ret;
  5212. __ret = __p0 ^ __p1;
  5213. return __ret;
  5214. }
  5215. #else
  5216. __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
  5217. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5218. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  5219. int32x2_t __ret;
  5220. __ret = __rev0 ^ __rev1;
  5221. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5222. return __ret;
  5223. }
  5224. #endif
  5225. #ifdef __LITTLE_ENDIAN__
  5226. __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
  5227. int64x1_t __ret;
  5228. __ret = __p0 ^ __p1;
  5229. return __ret;
  5230. }
  5231. #else
  5232. __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
  5233. int64x1_t __ret;
  5234. __ret = __p0 ^ __p1;
  5235. return __ret;
  5236. }
  5237. #endif
  5238. #ifdef __LITTLE_ENDIAN__
  5239. __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
  5240. int16x4_t __ret;
  5241. __ret = __p0 ^ __p1;
  5242. return __ret;
  5243. }
  5244. #else
  5245. __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
  5246. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5247. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  5248. int16x4_t __ret;
  5249. __ret = __rev0 ^ __rev1;
  5250. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5251. return __ret;
  5252. }
  5253. #endif
  5254. #ifdef __LITTLE_ENDIAN__
  5255. #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
  5256. poly8x8_t __s0 = __p0; \
  5257. poly8x8_t __s1 = __p1; \
  5258. poly8x8_t __ret; \
  5259. __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
  5260. __ret; \
  5261. })
  5262. #else
  5263. #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
  5264. poly8x8_t __s0 = __p0; \
  5265. poly8x8_t __s1 = __p1; \
  5266. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5267. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5268. poly8x8_t __ret; \
  5269. __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
  5270. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5271. __ret; \
  5272. })
  5273. #endif
  5274. #ifdef __LITTLE_ENDIAN__
  5275. #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
  5276. poly16x4_t __s0 = __p0; \
  5277. poly16x4_t __s1 = __p1; \
  5278. poly16x4_t __ret; \
  5279. __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
  5280. __ret; \
  5281. })
  5282. #else
  5283. #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
  5284. poly16x4_t __s0 = __p0; \
  5285. poly16x4_t __s1 = __p1; \
  5286. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5287. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5288. poly16x4_t __ret; \
  5289. __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
  5290. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5291. __ret; \
  5292. })
  5293. #endif
  5294. #ifdef __LITTLE_ENDIAN__
  5295. #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
  5296. poly8x16_t __s0 = __p0; \
  5297. poly8x16_t __s1 = __p1; \
  5298. poly8x16_t __ret; \
  5299. __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
  5300. __ret; \
  5301. })
  5302. #else
  5303. #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
  5304. poly8x16_t __s0 = __p0; \
  5305. poly8x16_t __s1 = __p1; \
  5306. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5307. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5308. poly8x16_t __ret; \
  5309. __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
  5310. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5311. __ret; \
  5312. })
  5313. #endif
  5314. #ifdef __LITTLE_ENDIAN__
  5315. #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
  5316. poly16x8_t __s0 = __p0; \
  5317. poly16x8_t __s1 = __p1; \
  5318. poly16x8_t __ret; \
  5319. __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
  5320. __ret; \
  5321. })
  5322. #else
  5323. #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
  5324. poly16x8_t __s0 = __p0; \
  5325. poly16x8_t __s1 = __p1; \
  5326. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5327. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5328. poly16x8_t __ret; \
  5329. __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
  5330. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5331. __ret; \
  5332. })
  5333. #endif
  5334. #ifdef __LITTLE_ENDIAN__
  5335. #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
  5336. uint8x16_t __s0 = __p0; \
  5337. uint8x16_t __s1 = __p1; \
  5338. uint8x16_t __ret; \
  5339. __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
  5340. __ret; \
  5341. })
  5342. #else
  5343. #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
  5344. uint8x16_t __s0 = __p0; \
  5345. uint8x16_t __s1 = __p1; \
  5346. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5347. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5348. uint8x16_t __ret; \
  5349. __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
  5350. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5351. __ret; \
  5352. })
  5353. #endif
  5354. #ifdef __LITTLE_ENDIAN__
  5355. #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
  5356. uint32x4_t __s0 = __p0; \
  5357. uint32x4_t __s1 = __p1; \
  5358. uint32x4_t __ret; \
  5359. __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
  5360. __ret; \
  5361. })
  5362. #else
  5363. #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
  5364. uint32x4_t __s0 = __p0; \
  5365. uint32x4_t __s1 = __p1; \
  5366. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5367. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5368. uint32x4_t __ret; \
  5369. __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
  5370. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5371. __ret; \
  5372. })
  5373. #endif
  5374. #ifdef __LITTLE_ENDIAN__
  5375. #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
  5376. uint64x2_t __s0 = __p0; \
  5377. uint64x2_t __s1 = __p1; \
  5378. uint64x2_t __ret; \
  5379. __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
  5380. __ret; \
  5381. })
  5382. #else
  5383. #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
  5384. uint64x2_t __s0 = __p0; \
  5385. uint64x2_t __s1 = __p1; \
  5386. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  5387. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  5388. uint64x2_t __ret; \
  5389. __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
  5390. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  5391. __ret; \
  5392. })
  5393. #endif
  5394. #ifdef __LITTLE_ENDIAN__
  5395. #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
  5396. uint16x8_t __s0 = __p0; \
  5397. uint16x8_t __s1 = __p1; \
  5398. uint16x8_t __ret; \
  5399. __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
  5400. __ret; \
  5401. })
  5402. #else
  5403. #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
  5404. uint16x8_t __s0 = __p0; \
  5405. uint16x8_t __s1 = __p1; \
  5406. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5407. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5408. uint16x8_t __ret; \
  5409. __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
  5410. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5411. __ret; \
  5412. })
  5413. #endif
  5414. #ifdef __LITTLE_ENDIAN__
  5415. #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
  5416. int8x16_t __s0 = __p0; \
  5417. int8x16_t __s1 = __p1; \
  5418. int8x16_t __ret; \
  5419. __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
  5420. __ret; \
  5421. })
  5422. #else
  5423. #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
  5424. int8x16_t __s0 = __p0; \
  5425. int8x16_t __s1 = __p1; \
  5426. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5427. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5428. int8x16_t __ret; \
  5429. __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
  5430. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5431. __ret; \
  5432. })
  5433. #endif
  5434. #ifdef __LITTLE_ENDIAN__
  5435. #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
  5436. float32x4_t __s0 = __p0; \
  5437. float32x4_t __s1 = __p1; \
  5438. float32x4_t __ret; \
  5439. __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
  5440. __ret; \
  5441. })
  5442. #else
  5443. #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
  5444. float32x4_t __s0 = __p0; \
  5445. float32x4_t __s1 = __p1; \
  5446. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5447. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5448. float32x4_t __ret; \
  5449. __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
  5450. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5451. __ret; \
  5452. })
  5453. #endif
  5454. #ifdef __LITTLE_ENDIAN__
  5455. #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
  5456. int32x4_t __s0 = __p0; \
  5457. int32x4_t __s1 = __p1; \
  5458. int32x4_t __ret; \
  5459. __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
  5460. __ret; \
  5461. })
  5462. #else
  5463. #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
  5464. int32x4_t __s0 = __p0; \
  5465. int32x4_t __s1 = __p1; \
  5466. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5467. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5468. int32x4_t __ret; \
  5469. __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
  5470. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5471. __ret; \
  5472. })
  5473. #endif
  5474. #ifdef __LITTLE_ENDIAN__
  5475. #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
  5476. int64x2_t __s0 = __p0; \
  5477. int64x2_t __s1 = __p1; \
  5478. int64x2_t __ret; \
  5479. __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
  5480. __ret; \
  5481. })
  5482. #else
  5483. #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
  5484. int64x2_t __s0 = __p0; \
  5485. int64x2_t __s1 = __p1; \
  5486. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  5487. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  5488. int64x2_t __ret; \
  5489. __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
  5490. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  5491. __ret; \
  5492. })
  5493. #endif
  5494. #ifdef __LITTLE_ENDIAN__
  5495. #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
  5496. int16x8_t __s0 = __p0; \
  5497. int16x8_t __s1 = __p1; \
  5498. int16x8_t __ret; \
  5499. __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
  5500. __ret; \
  5501. })
  5502. #else
  5503. #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
  5504. int16x8_t __s0 = __p0; \
  5505. int16x8_t __s1 = __p1; \
  5506. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5507. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5508. int16x8_t __ret; \
  5509. __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
  5510. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5511. __ret; \
  5512. })
  5513. #endif
  5514. #ifdef __LITTLE_ENDIAN__
  5515. #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
  5516. uint8x8_t __s0 = __p0; \
  5517. uint8x8_t __s1 = __p1; \
  5518. uint8x8_t __ret; \
  5519. __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
  5520. __ret; \
  5521. })
  5522. #else
  5523. #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
  5524. uint8x8_t __s0 = __p0; \
  5525. uint8x8_t __s1 = __p1; \
  5526. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5527. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5528. uint8x8_t __ret; \
  5529. __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
  5530. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5531. __ret; \
  5532. })
  5533. #endif
  5534. #ifdef __LITTLE_ENDIAN__
  5535. #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
  5536. uint32x2_t __s0 = __p0; \
  5537. uint32x2_t __s1 = __p1; \
  5538. uint32x2_t __ret; \
  5539. __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
  5540. __ret; \
  5541. })
  5542. #else
  5543. #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
  5544. uint32x2_t __s0 = __p0; \
  5545. uint32x2_t __s1 = __p1; \
  5546. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  5547. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  5548. uint32x2_t __ret; \
  5549. __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
  5550. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  5551. __ret; \
  5552. })
  5553. #endif
  5554. #ifdef __LITTLE_ENDIAN__
  5555. #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
  5556. uint64x1_t __s0 = __p0; \
  5557. uint64x1_t __s1 = __p1; \
  5558. uint64x1_t __ret; \
  5559. __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  5560. __ret; \
  5561. })
  5562. #else
  5563. #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
  5564. uint64x1_t __s0 = __p0; \
  5565. uint64x1_t __s1 = __p1; \
  5566. uint64x1_t __ret; \
  5567. __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  5568. __ret; \
  5569. })
  5570. #endif
  5571. #ifdef __LITTLE_ENDIAN__
  5572. #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
  5573. uint16x4_t __s0 = __p0; \
  5574. uint16x4_t __s1 = __p1; \
  5575. uint16x4_t __ret; \
  5576. __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
  5577. __ret; \
  5578. })
  5579. #else
  5580. #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
  5581. uint16x4_t __s0 = __p0; \
  5582. uint16x4_t __s1 = __p1; \
  5583. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5584. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5585. uint16x4_t __ret; \
  5586. __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
  5587. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5588. __ret; \
  5589. })
  5590. #endif
  5591. #ifdef __LITTLE_ENDIAN__
  5592. #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
  5593. int8x8_t __s0 = __p0; \
  5594. int8x8_t __s1 = __p1; \
  5595. int8x8_t __ret; \
  5596. __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
  5597. __ret; \
  5598. })
  5599. #else
  5600. #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
  5601. int8x8_t __s0 = __p0; \
  5602. int8x8_t __s1 = __p1; \
  5603. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5604. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  5605. int8x8_t __ret; \
  5606. __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
  5607. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  5608. __ret; \
  5609. })
  5610. #endif
  5611. #ifdef __LITTLE_ENDIAN__
  5612. #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
  5613. float32x2_t __s0 = __p0; \
  5614. float32x2_t __s1 = __p1; \
  5615. float32x2_t __ret; \
  5616. __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
  5617. __ret; \
  5618. })
  5619. #else
  5620. #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
  5621. float32x2_t __s0 = __p0; \
  5622. float32x2_t __s1 = __p1; \
  5623. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  5624. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  5625. float32x2_t __ret; \
  5626. __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
  5627. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  5628. __ret; \
  5629. })
  5630. #endif
  5631. #ifdef __LITTLE_ENDIAN__
  5632. #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
  5633. int32x2_t __s0 = __p0; \
  5634. int32x2_t __s1 = __p1; \
  5635. int32x2_t __ret; \
  5636. __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
  5637. __ret; \
  5638. })
  5639. #else
  5640. #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
  5641. int32x2_t __s0 = __p0; \
  5642. int32x2_t __s1 = __p1; \
  5643. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  5644. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  5645. int32x2_t __ret; \
  5646. __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
  5647. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  5648. __ret; \
  5649. })
  5650. #endif
  5651. #ifdef __LITTLE_ENDIAN__
  5652. #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
  5653. int64x1_t __s0 = __p0; \
  5654. int64x1_t __s1 = __p1; \
  5655. int64x1_t __ret; \
  5656. __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  5657. __ret; \
  5658. })
  5659. #else
  5660. #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
  5661. int64x1_t __s0 = __p0; \
  5662. int64x1_t __s1 = __p1; \
  5663. int64x1_t __ret; \
  5664. __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  5665. __ret; \
  5666. })
  5667. #endif
  5668. #ifdef __LITTLE_ENDIAN__
  5669. #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
  5670. int16x4_t __s0 = __p0; \
  5671. int16x4_t __s1 = __p1; \
  5672. int16x4_t __ret; \
  5673. __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
  5674. __ret; \
  5675. })
  5676. #else
  5677. #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
  5678. int16x4_t __s0 = __p0; \
  5679. int16x4_t __s1 = __p1; \
  5680. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5681. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  5682. int16x4_t __ret; \
  5683. __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
  5684. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  5685. __ret; \
  5686. })
  5687. #endif
  5688. #ifdef __LITTLE_ENDIAN__
  5689. __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
  5690. poly8x8_t __ret;
  5691. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5692. return __ret;
  5693. }
  5694. #else
  5695. __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
  5696. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5697. poly8x8_t __ret;
  5698. __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
  5699. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5700. return __ret;
  5701. }
  5702. __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
  5703. poly8x8_t __ret;
  5704. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5705. return __ret;
  5706. }
  5707. #endif
  5708. #ifdef __LITTLE_ENDIAN__
  5709. __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
  5710. poly16x4_t __ret;
  5711. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5712. return __ret;
  5713. }
  5714. #else
  5715. __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
  5716. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5717. poly16x4_t __ret;
  5718. __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
  5719. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5720. return __ret;
  5721. }
  5722. #endif
  5723. #ifdef __LITTLE_ENDIAN__
  5724. __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
  5725. uint8x8_t __ret;
  5726. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5727. return __ret;
  5728. }
  5729. #else
  5730. __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
  5731. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5732. uint8x8_t __ret;
  5733. __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
  5734. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5735. return __ret;
  5736. }
  5737. __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
  5738. uint8x8_t __ret;
  5739. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5740. return __ret;
  5741. }
  5742. #endif
  5743. #ifdef __LITTLE_ENDIAN__
  5744. __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
  5745. uint32x2_t __ret;
  5746. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5747. return __ret;
  5748. }
  5749. #else
  5750. __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
  5751. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5752. uint32x2_t __ret;
  5753. __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
  5754. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5755. return __ret;
  5756. }
  5757. __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
  5758. uint32x2_t __ret;
  5759. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5760. return __ret;
  5761. }
  5762. #endif
  5763. #ifdef __LITTLE_ENDIAN__
  5764. __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
  5765. uint64x1_t __ret;
  5766. __ret = __builtin_shufflevector(__p0, __p0, 1);
  5767. return __ret;
  5768. }
  5769. #else
  5770. __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
  5771. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5772. uint64x1_t __ret;
  5773. __ret = __builtin_shufflevector(__rev0, __rev0, 1);
  5774. return __ret;
  5775. }
  5776. #endif
  5777. #ifdef __LITTLE_ENDIAN__
  5778. __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
  5779. uint16x4_t __ret;
  5780. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5781. return __ret;
  5782. }
  5783. #else
  5784. __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
  5785. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5786. uint16x4_t __ret;
  5787. __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
  5788. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5789. return __ret;
  5790. }
  5791. __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
  5792. uint16x4_t __ret;
  5793. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5794. return __ret;
  5795. }
  5796. #endif
  5797. #ifdef __LITTLE_ENDIAN__
  5798. __ai int8x8_t vget_high_s8(int8x16_t __p0) {
  5799. int8x8_t __ret;
  5800. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5801. return __ret;
  5802. }
  5803. #else
  5804. __ai int8x8_t vget_high_s8(int8x16_t __p0) {
  5805. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  5806. int8x8_t __ret;
  5807. __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
  5808. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  5809. return __ret;
  5810. }
  5811. __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
  5812. int8x8_t __ret;
  5813. __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
  5814. return __ret;
  5815. }
  5816. #endif
  5817. #ifdef __LITTLE_ENDIAN__
  5818. __ai float32x2_t vget_high_f32(float32x4_t __p0) {
  5819. float32x2_t __ret;
  5820. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5821. return __ret;
  5822. }
  5823. #else
  5824. __ai float32x2_t vget_high_f32(float32x4_t __p0) {
  5825. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5826. float32x2_t __ret;
  5827. __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
  5828. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5829. return __ret;
  5830. }
  5831. __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
  5832. float32x2_t __ret;
  5833. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5834. return __ret;
  5835. }
  5836. #endif
  5837. #ifdef __LITTLE_ENDIAN__
  5838. __ai float16x4_t vget_high_f16(float16x8_t __p0) {
  5839. float16x4_t __ret;
  5840. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5841. return __ret;
  5842. }
  5843. #else
  5844. __ai float16x4_t vget_high_f16(float16x8_t __p0) {
  5845. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5846. float16x4_t __ret;
  5847. __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
  5848. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5849. return __ret;
  5850. }
  5851. __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
  5852. float16x4_t __ret;
  5853. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5854. return __ret;
  5855. }
  5856. #endif
  5857. #ifdef __LITTLE_ENDIAN__
  5858. __ai int32x2_t vget_high_s32(int32x4_t __p0) {
  5859. int32x2_t __ret;
  5860. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5861. return __ret;
  5862. }
  5863. #else
  5864. __ai int32x2_t vget_high_s32(int32x4_t __p0) {
  5865. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  5866. int32x2_t __ret;
  5867. __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
  5868. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  5869. return __ret;
  5870. }
  5871. __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
  5872. int32x2_t __ret;
  5873. __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
  5874. return __ret;
  5875. }
  5876. #endif
  5877. #ifdef __LITTLE_ENDIAN__
  5878. __ai int64x1_t vget_high_s64(int64x2_t __p0) {
  5879. int64x1_t __ret;
  5880. __ret = __builtin_shufflevector(__p0, __p0, 1);
  5881. return __ret;
  5882. }
  5883. #else
  5884. __ai int64x1_t vget_high_s64(int64x2_t __p0) {
  5885. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  5886. int64x1_t __ret;
  5887. __ret = __builtin_shufflevector(__rev0, __rev0, 1);
  5888. return __ret;
  5889. }
  5890. #endif
  5891. #ifdef __LITTLE_ENDIAN__
  5892. __ai int16x4_t vget_high_s16(int16x8_t __p0) {
  5893. int16x4_t __ret;
  5894. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5895. return __ret;
  5896. }
  5897. #else
  5898. __ai int16x4_t vget_high_s16(int16x8_t __p0) {
  5899. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  5900. int16x4_t __ret;
  5901. __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
  5902. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  5903. return __ret;
  5904. }
  5905. __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
  5906. int16x4_t __ret;
  5907. __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
  5908. return __ret;
  5909. }
  5910. #endif
  5911. #ifdef __LITTLE_ENDIAN__
  5912. #define vget_lane_p8(__p0, __p1) __extension__ ({ \
  5913. poly8x8_t __s0 = __p0; \
  5914. poly8_t __ret; \
  5915. __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  5916. __ret; \
  5917. })
  5918. #else
  5919. #define vget_lane_p8(__p0, __p1) __extension__ ({ \
  5920. poly8x8_t __s0 = __p0; \
  5921. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5922. poly8_t __ret; \
  5923. __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
  5924. __ret; \
  5925. })
  5926. #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
  5927. poly8x8_t __s0 = __p0; \
  5928. poly8_t __ret; \
  5929. __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  5930. __ret; \
  5931. })
  5932. #endif
  5933. #ifdef __LITTLE_ENDIAN__
  5934. #define vget_lane_p16(__p0, __p1) __extension__ ({ \
  5935. poly16x4_t __s0 = __p0; \
  5936. poly16_t __ret; \
  5937. __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  5938. __ret; \
  5939. })
  5940. #else
  5941. #define vget_lane_p16(__p0, __p1) __extension__ ({ \
  5942. poly16x4_t __s0 = __p0; \
  5943. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  5944. poly16_t __ret; \
  5945. __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
  5946. __ret; \
  5947. })
  5948. #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
  5949. poly16x4_t __s0 = __p0; \
  5950. poly16_t __ret; \
  5951. __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  5952. __ret; \
  5953. })
  5954. #endif
  5955. #ifdef __LITTLE_ENDIAN__
  5956. #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
  5957. poly8x16_t __s0 = __p0; \
  5958. poly8_t __ret; \
  5959. __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  5960. __ret; \
  5961. })
  5962. #else
  5963. #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
  5964. poly8x16_t __s0 = __p0; \
  5965. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  5966. poly8_t __ret; \
  5967. __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
  5968. __ret; \
  5969. })
  5970. #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
  5971. poly8x16_t __s0 = __p0; \
  5972. poly8_t __ret; \
  5973. __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  5974. __ret; \
  5975. })
  5976. #endif
  5977. #ifdef __LITTLE_ENDIAN__
  5978. #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
  5979. poly16x8_t __s0 = __p0; \
  5980. poly16_t __ret; \
  5981. __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  5982. __ret; \
  5983. })
  5984. #else
  5985. #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
  5986. poly16x8_t __s0 = __p0; \
  5987. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  5988. poly16_t __ret; \
  5989. __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
  5990. __ret; \
  5991. })
  5992. #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
  5993. poly16x8_t __s0 = __p0; \
  5994. poly16_t __ret; \
  5995. __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  5996. __ret; \
  5997. })
  5998. #endif
  5999. #ifdef __LITTLE_ENDIAN__
  6000. #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
  6001. uint8x16_t __s0 = __p0; \
  6002. uint8_t __ret; \
  6003. __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  6004. __ret; \
  6005. })
  6006. #else
  6007. #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
  6008. uint8x16_t __s0 = __p0; \
  6009. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  6010. uint8_t __ret; \
  6011. __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
  6012. __ret; \
  6013. })
  6014. #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
  6015. uint8x16_t __s0 = __p0; \
  6016. uint8_t __ret; \
  6017. __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  6018. __ret; \
  6019. })
  6020. #endif
  6021. #ifdef __LITTLE_ENDIAN__
  6022. #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
  6023. uint32x4_t __s0 = __p0; \
  6024. uint32_t __ret; \
  6025. __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
  6026. __ret; \
  6027. })
  6028. #else
  6029. #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
  6030. uint32x4_t __s0 = __p0; \
  6031. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  6032. uint32_t __ret; \
  6033. __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
  6034. __ret; \
  6035. })
  6036. #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
  6037. uint32x4_t __s0 = __p0; \
  6038. uint32_t __ret; \
  6039. __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
  6040. __ret; \
  6041. })
  6042. #endif
  6043. #ifdef __LITTLE_ENDIAN__
  6044. #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
  6045. uint64x2_t __s0 = __p0; \
  6046. uint64_t __ret; \
  6047. __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  6048. __ret; \
  6049. })
  6050. #else
  6051. #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
  6052. uint64x2_t __s0 = __p0; \
  6053. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  6054. uint64_t __ret; \
  6055. __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
  6056. __ret; \
  6057. })
  6058. #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
  6059. uint64x2_t __s0 = __p0; \
  6060. uint64_t __ret; \
  6061. __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  6062. __ret; \
  6063. })
  6064. #endif
  6065. #ifdef __LITTLE_ENDIAN__
  6066. #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
  6067. uint16x8_t __s0 = __p0; \
  6068. uint16_t __ret; \
  6069. __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  6070. __ret; \
  6071. })
  6072. #else
  6073. #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
  6074. uint16x8_t __s0 = __p0; \
  6075. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  6076. uint16_t __ret; \
  6077. __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
  6078. __ret; \
  6079. })
  6080. #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
  6081. uint16x8_t __s0 = __p0; \
  6082. uint16_t __ret; \
  6083. __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  6084. __ret; \
  6085. })
  6086. #endif
  6087. #ifdef __LITTLE_ENDIAN__
  6088. #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
  6089. int8x16_t __s0 = __p0; \
  6090. int8_t __ret; \
  6091. __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  6092. __ret; \
  6093. })
  6094. #else
  6095. #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
  6096. int8x16_t __s0 = __p0; \
  6097. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  6098. int8_t __ret; \
  6099. __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
  6100. __ret; \
  6101. })
  6102. #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
  6103. int8x16_t __s0 = __p0; \
  6104. int8_t __ret; \
  6105. __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
  6106. __ret; \
  6107. })
  6108. #endif
  6109. #ifdef __LITTLE_ENDIAN__
  6110. #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
  6111. float32x4_t __s0 = __p0; \
  6112. float32_t __ret; \
  6113. __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
  6114. __ret; \
  6115. })
  6116. #else
  6117. #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
  6118. float32x4_t __s0 = __p0; \
  6119. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  6120. float32_t __ret; \
  6121. __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
  6122. __ret; \
  6123. })
  6124. #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
  6125. float32x4_t __s0 = __p0; \
  6126. float32_t __ret; \
  6127. __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
  6128. __ret; \
  6129. })
  6130. #endif
  6131. #ifdef __LITTLE_ENDIAN__
  6132. #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
  6133. int32x4_t __s0 = __p0; \
  6134. int32_t __ret; \
  6135. __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
  6136. __ret; \
  6137. })
  6138. #else
  6139. #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
  6140. int32x4_t __s0 = __p0; \
  6141. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  6142. int32_t __ret; \
  6143. __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
  6144. __ret; \
  6145. })
  6146. #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
  6147. int32x4_t __s0 = __p0; \
  6148. int32_t __ret; \
  6149. __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
  6150. __ret; \
  6151. })
  6152. #endif
  6153. #ifdef __LITTLE_ENDIAN__
  6154. #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
  6155. int64x2_t __s0 = __p0; \
  6156. int64_t __ret; \
  6157. __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  6158. __ret; \
  6159. })
  6160. #else
  6161. #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
  6162. int64x2_t __s0 = __p0; \
  6163. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  6164. int64_t __ret; \
  6165. __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
  6166. __ret; \
  6167. })
  6168. #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
  6169. int64x2_t __s0 = __p0; \
  6170. int64_t __ret; \
  6171. __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  6172. __ret; \
  6173. })
  6174. #endif
  6175. #ifdef __LITTLE_ENDIAN__
  6176. #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
  6177. int16x8_t __s0 = __p0; \
  6178. int16_t __ret; \
  6179. __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  6180. __ret; \
  6181. })
  6182. #else
  6183. #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
  6184. int16x8_t __s0 = __p0; \
  6185. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  6186. int16_t __ret; \
  6187. __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
  6188. __ret; \
  6189. })
  6190. #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
  6191. int16x8_t __s0 = __p0; \
  6192. int16_t __ret; \
  6193. __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
  6194. __ret; \
  6195. })
  6196. #endif
  6197. #ifdef __LITTLE_ENDIAN__
  6198. #define vget_lane_u8(__p0, __p1) __extension__ ({ \
  6199. uint8x8_t __s0 = __p0; \
  6200. uint8_t __ret; \
  6201. __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  6202. __ret; \
  6203. })
  6204. #else
  6205. #define vget_lane_u8(__p0, __p1) __extension__ ({ \
  6206. uint8x8_t __s0 = __p0; \
  6207. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  6208. uint8_t __ret; \
  6209. __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
  6210. __ret; \
  6211. })
  6212. #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
  6213. uint8x8_t __s0 = __p0; \
  6214. uint8_t __ret; \
  6215. __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  6216. __ret; \
  6217. })
  6218. #endif
  6219. #ifdef __LITTLE_ENDIAN__
  6220. #define vget_lane_u32(__p0, __p1) __extension__ ({ \
  6221. uint32x2_t __s0 = __p0; \
  6222. uint32_t __ret; \
  6223. __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
  6224. __ret; \
  6225. })
  6226. #else
  6227. #define vget_lane_u32(__p0, __p1) __extension__ ({ \
  6228. uint32x2_t __s0 = __p0; \
  6229. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  6230. uint32_t __ret; \
  6231. __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
  6232. __ret; \
  6233. })
  6234. #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
  6235. uint32x2_t __s0 = __p0; \
  6236. uint32_t __ret; \
  6237. __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
  6238. __ret; \
  6239. })
  6240. #endif
  6241. #ifdef __LITTLE_ENDIAN__
  6242. #define vget_lane_u64(__p0, __p1) __extension__ ({ \
  6243. uint64x1_t __s0 = __p0; \
  6244. uint64_t __ret; \
  6245. __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6246. __ret; \
  6247. })
  6248. #else
  6249. #define vget_lane_u64(__p0, __p1) __extension__ ({ \
  6250. uint64x1_t __s0 = __p0; \
  6251. uint64_t __ret; \
  6252. __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6253. __ret; \
  6254. })
  6255. #define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
  6256. uint64x1_t __s0 = __p0; \
  6257. uint64_t __ret; \
  6258. __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6259. __ret; \
  6260. })
  6261. #endif
  6262. #ifdef __LITTLE_ENDIAN__
  6263. #define vget_lane_u16(__p0, __p1) __extension__ ({ \
  6264. uint16x4_t __s0 = __p0; \
  6265. uint16_t __ret; \
  6266. __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  6267. __ret; \
  6268. })
  6269. #else
  6270. #define vget_lane_u16(__p0, __p1) __extension__ ({ \
  6271. uint16x4_t __s0 = __p0; \
  6272. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  6273. uint16_t __ret; \
  6274. __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
  6275. __ret; \
  6276. })
  6277. #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
  6278. uint16x4_t __s0 = __p0; \
  6279. uint16_t __ret; \
  6280. __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  6281. __ret; \
  6282. })
  6283. #endif
  6284. #ifdef __LITTLE_ENDIAN__
  6285. #define vget_lane_s8(__p0, __p1) __extension__ ({ \
  6286. int8x8_t __s0 = __p0; \
  6287. int8_t __ret; \
  6288. __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  6289. __ret; \
  6290. })
  6291. #else
  6292. #define vget_lane_s8(__p0, __p1) __extension__ ({ \
  6293. int8x8_t __s0 = __p0; \
  6294. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  6295. int8_t __ret; \
  6296. __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
  6297. __ret; \
  6298. })
  6299. #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
  6300. int8x8_t __s0 = __p0; \
  6301. int8_t __ret; \
  6302. __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
  6303. __ret; \
  6304. })
  6305. #endif
  6306. #ifdef __LITTLE_ENDIAN__
  6307. #define vget_lane_f32(__p0, __p1) __extension__ ({ \
  6308. float32x2_t __s0 = __p0; \
  6309. float32_t __ret; \
  6310. __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
  6311. __ret; \
  6312. })
  6313. #else
  6314. #define vget_lane_f32(__p0, __p1) __extension__ ({ \
  6315. float32x2_t __s0 = __p0; \
  6316. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  6317. float32_t __ret; \
  6318. __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
  6319. __ret; \
  6320. })
  6321. #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
  6322. float32x2_t __s0 = __p0; \
  6323. float32_t __ret; \
  6324. __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
  6325. __ret; \
  6326. })
  6327. #endif
  6328. #ifdef __LITTLE_ENDIAN__
  6329. #define vget_lane_s32(__p0, __p1) __extension__ ({ \
  6330. int32x2_t __s0 = __p0; \
  6331. int32_t __ret; \
  6332. __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
  6333. __ret; \
  6334. })
  6335. #else
  6336. #define vget_lane_s32(__p0, __p1) __extension__ ({ \
  6337. int32x2_t __s0 = __p0; \
  6338. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  6339. int32_t __ret; \
  6340. __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
  6341. __ret; \
  6342. })
  6343. #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
  6344. int32x2_t __s0 = __p0; \
  6345. int32_t __ret; \
  6346. __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
  6347. __ret; \
  6348. })
  6349. #endif
  6350. #ifdef __LITTLE_ENDIAN__
  6351. #define vget_lane_s64(__p0, __p1) __extension__ ({ \
  6352. int64x1_t __s0 = __p0; \
  6353. int64_t __ret; \
  6354. __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6355. __ret; \
  6356. })
  6357. #else
  6358. #define vget_lane_s64(__p0, __p1) __extension__ ({ \
  6359. int64x1_t __s0 = __p0; \
  6360. int64_t __ret; \
  6361. __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6362. __ret; \
  6363. })
  6364. #define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
  6365. int64x1_t __s0 = __p0; \
  6366. int64_t __ret; \
  6367. __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  6368. __ret; \
  6369. })
  6370. #endif
  6371. #ifdef __LITTLE_ENDIAN__
  6372. #define vget_lane_s16(__p0, __p1) __extension__ ({ \
  6373. int16x4_t __s0 = __p0; \
  6374. int16_t __ret; \
  6375. __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  6376. __ret; \
  6377. })
  6378. #else
  6379. #define vget_lane_s16(__p0, __p1) __extension__ ({ \
  6380. int16x4_t __s0 = __p0; \
  6381. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  6382. int16_t __ret; \
  6383. __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
  6384. __ret; \
  6385. })
  6386. #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
  6387. int16x4_t __s0 = __p0; \
  6388. int16_t __ret; \
  6389. __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
  6390. __ret; \
  6391. })
  6392. #endif
  6393. #ifdef __LITTLE_ENDIAN__
  6394. __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
  6395. poly8x8_t __ret;
  6396. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
  6397. return __ret;
  6398. }
  6399. #else
  6400. __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
  6401. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6402. poly8x8_t __ret;
  6403. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
  6404. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6405. return __ret;
  6406. }
  6407. #endif
  6408. #ifdef __LITTLE_ENDIAN__
  6409. __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
  6410. poly16x4_t __ret;
  6411. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
  6412. return __ret;
  6413. }
  6414. #else
  6415. __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
  6416. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6417. poly16x4_t __ret;
  6418. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
  6419. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6420. return __ret;
  6421. }
  6422. #endif
  6423. #ifdef __LITTLE_ENDIAN__
  6424. __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
  6425. uint8x8_t __ret;
  6426. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
  6427. return __ret;
  6428. }
  6429. #else
  6430. __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
  6431. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6432. uint8x8_t __ret;
  6433. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
  6434. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6435. return __ret;
  6436. }
  6437. #endif
  6438. #ifdef __LITTLE_ENDIAN__
  6439. __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
  6440. uint32x2_t __ret;
  6441. __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
  6442. return __ret;
  6443. }
  6444. #else
  6445. __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
  6446. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6447. uint32x2_t __ret;
  6448. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
  6449. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6450. return __ret;
  6451. }
  6452. #endif
  6453. #ifdef __LITTLE_ENDIAN__
  6454. __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
  6455. uint64x1_t __ret;
  6456. __ret = __builtin_shufflevector(__p0, __p0, 0);
  6457. return __ret;
  6458. }
  6459. #else
  6460. __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
  6461. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6462. uint64x1_t __ret;
  6463. __ret = __builtin_shufflevector(__rev0, __rev0, 0);
  6464. return __ret;
  6465. }
  6466. #endif
  6467. #ifdef __LITTLE_ENDIAN__
  6468. __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
  6469. uint16x4_t __ret;
  6470. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
  6471. return __ret;
  6472. }
  6473. #else
  6474. __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
  6475. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6476. uint16x4_t __ret;
  6477. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
  6478. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6479. return __ret;
  6480. }
  6481. #endif
  6482. #ifdef __LITTLE_ENDIAN__
  6483. __ai int8x8_t vget_low_s8(int8x16_t __p0) {
  6484. int8x8_t __ret;
  6485. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
  6486. return __ret;
  6487. }
  6488. #else
  6489. __ai int8x8_t vget_low_s8(int8x16_t __p0) {
  6490. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6491. int8x8_t __ret;
  6492. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
  6493. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6494. return __ret;
  6495. }
  6496. #endif
  6497. #ifdef __LITTLE_ENDIAN__
  6498. __ai float32x2_t vget_low_f32(float32x4_t __p0) {
  6499. float32x2_t __ret;
  6500. __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
  6501. return __ret;
  6502. }
  6503. #else
  6504. __ai float32x2_t vget_low_f32(float32x4_t __p0) {
  6505. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6506. float32x2_t __ret;
  6507. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
  6508. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6509. return __ret;
  6510. }
  6511. #endif
  6512. #ifdef __LITTLE_ENDIAN__
  6513. __ai float16x4_t vget_low_f16(float16x8_t __p0) {
  6514. float16x4_t __ret;
  6515. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
  6516. return __ret;
  6517. }
  6518. #else
  6519. __ai float16x4_t vget_low_f16(float16x8_t __p0) {
  6520. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6521. float16x4_t __ret;
  6522. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
  6523. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6524. return __ret;
  6525. }
  6526. #endif
  6527. #ifdef __LITTLE_ENDIAN__
  6528. __ai int32x2_t vget_low_s32(int32x4_t __p0) {
  6529. int32x2_t __ret;
  6530. __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
  6531. return __ret;
  6532. }
  6533. #else
  6534. __ai int32x2_t vget_low_s32(int32x4_t __p0) {
  6535. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6536. int32x2_t __ret;
  6537. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
  6538. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6539. return __ret;
  6540. }
  6541. #endif
  6542. #ifdef __LITTLE_ENDIAN__
  6543. __ai int64x1_t vget_low_s64(int64x2_t __p0) {
  6544. int64x1_t __ret;
  6545. __ret = __builtin_shufflevector(__p0, __p0, 0);
  6546. return __ret;
  6547. }
  6548. #else
  6549. __ai int64x1_t vget_low_s64(int64x2_t __p0) {
  6550. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6551. int64x1_t __ret;
  6552. __ret = __builtin_shufflevector(__rev0, __rev0, 0);
  6553. return __ret;
  6554. }
  6555. #endif
  6556. #ifdef __LITTLE_ENDIAN__
  6557. __ai int16x4_t vget_low_s16(int16x8_t __p0) {
  6558. int16x4_t __ret;
  6559. __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
  6560. return __ret;
  6561. }
  6562. #else
  6563. __ai int16x4_t vget_low_s16(int16x8_t __p0) {
  6564. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6565. int16x4_t __ret;
  6566. __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
  6567. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6568. return __ret;
  6569. }
  6570. #endif
  6571. #ifdef __LITTLE_ENDIAN__
  6572. __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  6573. uint8x16_t __ret;
  6574. __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  6575. return __ret;
  6576. }
  6577. #else
  6578. __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  6579. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6580. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6581. uint8x16_t __ret;
  6582. __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  6583. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6584. return __ret;
  6585. }
  6586. #endif
  6587. #ifdef __LITTLE_ENDIAN__
  6588. __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  6589. uint32x4_t __ret;
  6590. __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  6591. return __ret;
  6592. }
  6593. #else
  6594. __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  6595. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6596. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6597. uint32x4_t __ret;
  6598. __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  6599. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6600. return __ret;
  6601. }
  6602. #endif
  6603. #ifdef __LITTLE_ENDIAN__
  6604. __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  6605. uint16x8_t __ret;
  6606. __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  6607. return __ret;
  6608. }
  6609. #else
  6610. __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  6611. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6612. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6613. uint16x8_t __ret;
  6614. __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  6615. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6616. return __ret;
  6617. }
  6618. #endif
  6619. #ifdef __LITTLE_ENDIAN__
  6620. __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  6621. int8x16_t __ret;
  6622. __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  6623. return __ret;
  6624. }
  6625. #else
  6626. __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  6627. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6628. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6629. int8x16_t __ret;
  6630. __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  6631. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6632. return __ret;
  6633. }
  6634. #endif
  6635. #ifdef __LITTLE_ENDIAN__
  6636. __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  6637. int32x4_t __ret;
  6638. __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  6639. return __ret;
  6640. }
  6641. #else
  6642. __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  6643. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6644. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6645. int32x4_t __ret;
  6646. __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  6647. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6648. return __ret;
  6649. }
  6650. #endif
  6651. #ifdef __LITTLE_ENDIAN__
  6652. __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  6653. int16x8_t __ret;
  6654. __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  6655. return __ret;
  6656. }
  6657. #else
  6658. __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  6659. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6660. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6661. int16x8_t __ret;
  6662. __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  6663. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6664. return __ret;
  6665. }
  6666. #endif
  6667. #ifdef __LITTLE_ENDIAN__
  6668. __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  6669. uint8x8_t __ret;
  6670. __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  6671. return __ret;
  6672. }
  6673. #else
  6674. __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  6675. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6676. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6677. uint8x8_t __ret;
  6678. __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  6679. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6680. return __ret;
  6681. }
  6682. #endif
  6683. #ifdef __LITTLE_ENDIAN__
  6684. __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  6685. uint32x2_t __ret;
  6686. __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  6687. return __ret;
  6688. }
  6689. #else
  6690. __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  6691. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6692. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  6693. uint32x2_t __ret;
  6694. __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  6695. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6696. return __ret;
  6697. }
  6698. #endif
  6699. #ifdef __LITTLE_ENDIAN__
  6700. __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  6701. uint16x4_t __ret;
  6702. __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  6703. return __ret;
  6704. }
  6705. #else
  6706. __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  6707. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6708. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6709. uint16x4_t __ret;
  6710. __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  6711. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6712. return __ret;
  6713. }
  6714. #endif
  6715. #ifdef __LITTLE_ENDIAN__
  6716. __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
  6717. int8x8_t __ret;
  6718. __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  6719. return __ret;
  6720. }
  6721. #else
  6722. __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
  6723. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6724. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6725. int8x8_t __ret;
  6726. __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  6727. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6728. return __ret;
  6729. }
  6730. #endif
  6731. #ifdef __LITTLE_ENDIAN__
  6732. __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
  6733. int32x2_t __ret;
  6734. __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  6735. return __ret;
  6736. }
  6737. #else
  6738. __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
  6739. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6740. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  6741. int32x2_t __ret;
  6742. __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  6743. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6744. return __ret;
  6745. }
  6746. #endif
  6747. #ifdef __LITTLE_ENDIAN__
  6748. __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
  6749. int16x4_t __ret;
  6750. __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  6751. return __ret;
  6752. }
  6753. #else
  6754. __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
  6755. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6756. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6757. int16x4_t __ret;
  6758. __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  6759. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6760. return __ret;
  6761. }
  6762. #endif
  6763. #ifdef __LITTLE_ENDIAN__
  6764. __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  6765. uint8x16_t __ret;
  6766. __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  6767. return __ret;
  6768. }
  6769. #else
  6770. __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  6771. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6772. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6773. uint8x16_t __ret;
  6774. __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  6775. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6776. return __ret;
  6777. }
  6778. #endif
  6779. #ifdef __LITTLE_ENDIAN__
  6780. __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  6781. uint32x4_t __ret;
  6782. __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  6783. return __ret;
  6784. }
  6785. #else
  6786. __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  6787. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6788. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6789. uint32x4_t __ret;
  6790. __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  6791. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6792. return __ret;
  6793. }
  6794. #endif
  6795. #ifdef __LITTLE_ENDIAN__
  6796. __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  6797. uint16x8_t __ret;
  6798. __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  6799. return __ret;
  6800. }
  6801. #else
  6802. __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  6803. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6804. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6805. uint16x8_t __ret;
  6806. __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  6807. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6808. return __ret;
  6809. }
  6810. #endif
  6811. #ifdef __LITTLE_ENDIAN__
  6812. __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  6813. int8x16_t __ret;
  6814. __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  6815. return __ret;
  6816. }
  6817. #else
  6818. __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  6819. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6820. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6821. int8x16_t __ret;
  6822. __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  6823. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  6824. return __ret;
  6825. }
  6826. #endif
  6827. #ifdef __LITTLE_ENDIAN__
  6828. __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  6829. int32x4_t __ret;
  6830. __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  6831. return __ret;
  6832. }
  6833. #else
  6834. __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  6835. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6836. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6837. int32x4_t __ret;
  6838. __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  6839. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6840. return __ret;
  6841. }
  6842. #endif
  6843. #ifdef __LITTLE_ENDIAN__
  6844. __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  6845. int16x8_t __ret;
  6846. __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  6847. return __ret;
  6848. }
  6849. #else
  6850. __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  6851. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6852. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6853. int16x8_t __ret;
  6854. __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  6855. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6856. return __ret;
  6857. }
  6858. #endif
  6859. #ifdef __LITTLE_ENDIAN__
  6860. __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  6861. uint8x8_t __ret;
  6862. __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  6863. return __ret;
  6864. }
  6865. #else
  6866. __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  6867. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6868. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6869. uint8x8_t __ret;
  6870. __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  6871. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6872. return __ret;
  6873. }
  6874. #endif
  6875. #ifdef __LITTLE_ENDIAN__
  6876. __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  6877. uint32x2_t __ret;
  6878. __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  6879. return __ret;
  6880. }
  6881. #else
  6882. __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  6883. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6884. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  6885. uint32x2_t __ret;
  6886. __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  6887. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6888. return __ret;
  6889. }
  6890. #endif
  6891. #ifdef __LITTLE_ENDIAN__
  6892. __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  6893. uint16x4_t __ret;
  6894. __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  6895. return __ret;
  6896. }
  6897. #else
  6898. __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  6899. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6900. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6901. uint16x4_t __ret;
  6902. __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  6903. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6904. return __ret;
  6905. }
  6906. #endif
  6907. #ifdef __LITTLE_ENDIAN__
  6908. __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
  6909. int8x8_t __ret;
  6910. __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  6911. return __ret;
  6912. }
  6913. #else
  6914. __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
  6915. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  6916. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  6917. int8x8_t __ret;
  6918. __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  6919. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  6920. return __ret;
  6921. }
  6922. #endif
  6923. #ifdef __LITTLE_ENDIAN__
  6924. __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
  6925. int32x2_t __ret;
  6926. __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  6927. return __ret;
  6928. }
  6929. #else
  6930. __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
  6931. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  6932. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  6933. int32x2_t __ret;
  6934. __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  6935. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  6936. return __ret;
  6937. }
  6938. #endif
  6939. #ifdef __LITTLE_ENDIAN__
  6940. __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
  6941. int16x4_t __ret;
  6942. __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  6943. return __ret;
  6944. }
  6945. #else
  6946. __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
  6947. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  6948. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  6949. int16x4_t __ret;
  6950. __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  6951. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  6952. return __ret;
  6953. }
  6954. #endif
  6955. #ifdef __LITTLE_ENDIAN__
  6956. #define vld1_p8(__p0) __extension__ ({ \
  6957. poly8x8_t __ret; \
  6958. __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
  6959. __ret; \
  6960. })
  6961. #else
  6962. #define vld1_p8(__p0) __extension__ ({ \
  6963. poly8x8_t __ret; \
  6964. __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
  6965. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  6966. __ret; \
  6967. })
  6968. #endif
  6969. #ifdef __LITTLE_ENDIAN__
  6970. #define vld1_p16(__p0) __extension__ ({ \
  6971. poly16x4_t __ret; \
  6972. __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
  6973. __ret; \
  6974. })
  6975. #else
  6976. #define vld1_p16(__p0) __extension__ ({ \
  6977. poly16x4_t __ret; \
  6978. __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
  6979. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  6980. __ret; \
  6981. })
  6982. #endif
  6983. #ifdef __LITTLE_ENDIAN__
  6984. #define vld1q_p8(__p0) __extension__ ({ \
  6985. poly8x16_t __ret; \
  6986. __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
  6987. __ret; \
  6988. })
  6989. #else
  6990. #define vld1q_p8(__p0) __extension__ ({ \
  6991. poly8x16_t __ret; \
  6992. __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
  6993. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  6994. __ret; \
  6995. })
  6996. #endif
  6997. #ifdef __LITTLE_ENDIAN__
  6998. #define vld1q_p16(__p0) __extension__ ({ \
  6999. poly16x8_t __ret; \
  7000. __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
  7001. __ret; \
  7002. })
  7003. #else
  7004. #define vld1q_p16(__p0) __extension__ ({ \
  7005. poly16x8_t __ret; \
  7006. __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
  7007. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7008. __ret; \
  7009. })
  7010. #endif
  7011. #ifdef __LITTLE_ENDIAN__
  7012. #define vld1q_u8(__p0) __extension__ ({ \
  7013. uint8x16_t __ret; \
  7014. __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
  7015. __ret; \
  7016. })
  7017. #else
  7018. #define vld1q_u8(__p0) __extension__ ({ \
  7019. uint8x16_t __ret; \
  7020. __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
  7021. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7022. __ret; \
  7023. })
  7024. #endif
  7025. #ifdef __LITTLE_ENDIAN__
  7026. #define vld1q_u32(__p0) __extension__ ({ \
  7027. uint32x4_t __ret; \
  7028. __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
  7029. __ret; \
  7030. })
  7031. #else
  7032. #define vld1q_u32(__p0) __extension__ ({ \
  7033. uint32x4_t __ret; \
  7034. __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
  7035. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7036. __ret; \
  7037. })
  7038. #endif
  7039. #ifdef __LITTLE_ENDIAN__
  7040. #define vld1q_u64(__p0) __extension__ ({ \
  7041. uint64x2_t __ret; \
  7042. __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
  7043. __ret; \
  7044. })
  7045. #else
  7046. #define vld1q_u64(__p0) __extension__ ({ \
  7047. uint64x2_t __ret; \
  7048. __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
  7049. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7050. __ret; \
  7051. })
  7052. #endif
  7053. #ifdef __LITTLE_ENDIAN__
  7054. #define vld1q_u16(__p0) __extension__ ({ \
  7055. uint16x8_t __ret; \
  7056. __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
  7057. __ret; \
  7058. })
  7059. #else
  7060. #define vld1q_u16(__p0) __extension__ ({ \
  7061. uint16x8_t __ret; \
  7062. __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
  7063. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7064. __ret; \
  7065. })
  7066. #endif
  7067. #ifdef __LITTLE_ENDIAN__
  7068. #define vld1q_s8(__p0) __extension__ ({ \
  7069. int8x16_t __ret; \
  7070. __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
  7071. __ret; \
  7072. })
  7073. #else
  7074. #define vld1q_s8(__p0) __extension__ ({ \
  7075. int8x16_t __ret; \
  7076. __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
  7077. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7078. __ret; \
  7079. })
  7080. #endif
  7081. #ifdef __LITTLE_ENDIAN__
  7082. #define vld1q_f32(__p0) __extension__ ({ \
  7083. float32x4_t __ret; \
  7084. __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
  7085. __ret; \
  7086. })
  7087. #else
  7088. #define vld1q_f32(__p0) __extension__ ({ \
  7089. float32x4_t __ret; \
  7090. __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
  7091. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7092. __ret; \
  7093. })
  7094. #endif
  7095. #ifdef __LITTLE_ENDIAN__
  7096. #define vld1q_f16(__p0) __extension__ ({ \
  7097. float16x8_t __ret; \
  7098. __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
  7099. __ret; \
  7100. })
  7101. #else
  7102. #define vld1q_f16(__p0) __extension__ ({ \
  7103. float16x8_t __ret; \
  7104. __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
  7105. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7106. __ret; \
  7107. })
  7108. #endif
  7109. #ifdef __LITTLE_ENDIAN__
  7110. #define vld1q_s32(__p0) __extension__ ({ \
  7111. int32x4_t __ret; \
  7112. __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
  7113. __ret; \
  7114. })
  7115. #else
  7116. #define vld1q_s32(__p0) __extension__ ({ \
  7117. int32x4_t __ret; \
  7118. __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
  7119. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7120. __ret; \
  7121. })
  7122. #endif
  7123. #ifdef __LITTLE_ENDIAN__
  7124. #define vld1q_s64(__p0) __extension__ ({ \
  7125. int64x2_t __ret; \
  7126. __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
  7127. __ret; \
  7128. })
  7129. #else
  7130. #define vld1q_s64(__p0) __extension__ ({ \
  7131. int64x2_t __ret; \
  7132. __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
  7133. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7134. __ret; \
  7135. })
  7136. #endif
  7137. #ifdef __LITTLE_ENDIAN__
  7138. #define vld1q_s16(__p0) __extension__ ({ \
  7139. int16x8_t __ret; \
  7140. __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
  7141. __ret; \
  7142. })
  7143. #else
  7144. #define vld1q_s16(__p0) __extension__ ({ \
  7145. int16x8_t __ret; \
  7146. __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
  7147. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7148. __ret; \
  7149. })
  7150. #endif
  7151. #ifdef __LITTLE_ENDIAN__
  7152. #define vld1_u8(__p0) __extension__ ({ \
  7153. uint8x8_t __ret; \
  7154. __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
  7155. __ret; \
  7156. })
  7157. #else
  7158. #define vld1_u8(__p0) __extension__ ({ \
  7159. uint8x8_t __ret; \
  7160. __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
  7161. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7162. __ret; \
  7163. })
  7164. #endif
  7165. #ifdef __LITTLE_ENDIAN__
  7166. #define vld1_u32(__p0) __extension__ ({ \
  7167. uint32x2_t __ret; \
  7168. __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
  7169. __ret; \
  7170. })
  7171. #else
  7172. #define vld1_u32(__p0) __extension__ ({ \
  7173. uint32x2_t __ret; \
  7174. __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
  7175. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7176. __ret; \
  7177. })
  7178. #endif
  7179. #ifdef __LITTLE_ENDIAN__
  7180. #define vld1_u64(__p0) __extension__ ({ \
  7181. uint64x1_t __ret; \
  7182. __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
  7183. __ret; \
  7184. })
  7185. #else
  7186. #define vld1_u64(__p0) __extension__ ({ \
  7187. uint64x1_t __ret; \
  7188. __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
  7189. __ret; \
  7190. })
  7191. #endif
  7192. #ifdef __LITTLE_ENDIAN__
  7193. #define vld1_u16(__p0) __extension__ ({ \
  7194. uint16x4_t __ret; \
  7195. __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
  7196. __ret; \
  7197. })
  7198. #else
  7199. #define vld1_u16(__p0) __extension__ ({ \
  7200. uint16x4_t __ret; \
  7201. __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
  7202. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7203. __ret; \
  7204. })
  7205. #endif
  7206. #ifdef __LITTLE_ENDIAN__
  7207. #define vld1_s8(__p0) __extension__ ({ \
  7208. int8x8_t __ret; \
  7209. __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
  7210. __ret; \
  7211. })
  7212. #else
  7213. #define vld1_s8(__p0) __extension__ ({ \
  7214. int8x8_t __ret; \
  7215. __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
  7216. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7217. __ret; \
  7218. })
  7219. #endif
  7220. #ifdef __LITTLE_ENDIAN__
  7221. #define vld1_f32(__p0) __extension__ ({ \
  7222. float32x2_t __ret; \
  7223. __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
  7224. __ret; \
  7225. })
  7226. #else
  7227. #define vld1_f32(__p0) __extension__ ({ \
  7228. float32x2_t __ret; \
  7229. __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
  7230. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7231. __ret; \
  7232. })
  7233. #endif
  7234. #ifdef __LITTLE_ENDIAN__
  7235. #define vld1_f16(__p0) __extension__ ({ \
  7236. float16x4_t __ret; \
  7237. __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
  7238. __ret; \
  7239. })
  7240. #else
  7241. #define vld1_f16(__p0) __extension__ ({ \
  7242. float16x4_t __ret; \
  7243. __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
  7244. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7245. __ret; \
  7246. })
  7247. #endif
  7248. #ifdef __LITTLE_ENDIAN__
  7249. #define vld1_s32(__p0) __extension__ ({ \
  7250. int32x2_t __ret; \
  7251. __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
  7252. __ret; \
  7253. })
  7254. #else
  7255. #define vld1_s32(__p0) __extension__ ({ \
  7256. int32x2_t __ret; \
  7257. __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
  7258. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7259. __ret; \
  7260. })
  7261. #endif
  7262. #ifdef __LITTLE_ENDIAN__
  7263. #define vld1_s64(__p0) __extension__ ({ \
  7264. int64x1_t __ret; \
  7265. __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
  7266. __ret; \
  7267. })
  7268. #else
  7269. #define vld1_s64(__p0) __extension__ ({ \
  7270. int64x1_t __ret; \
  7271. __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
  7272. __ret; \
  7273. })
  7274. #endif
  7275. #ifdef __LITTLE_ENDIAN__
  7276. #define vld1_s16(__p0) __extension__ ({ \
  7277. int16x4_t __ret; \
  7278. __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
  7279. __ret; \
  7280. })
  7281. #else
  7282. #define vld1_s16(__p0) __extension__ ({ \
  7283. int16x4_t __ret; \
  7284. __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
  7285. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7286. __ret; \
  7287. })
  7288. #endif
  7289. #ifdef __LITTLE_ENDIAN__
  7290. #define vld1_dup_p8(__p0) __extension__ ({ \
  7291. poly8x8_t __ret; \
  7292. __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
  7293. __ret; \
  7294. })
  7295. #else
  7296. #define vld1_dup_p8(__p0) __extension__ ({ \
  7297. poly8x8_t __ret; \
  7298. __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
  7299. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7300. __ret; \
  7301. })
  7302. #endif
  7303. #ifdef __LITTLE_ENDIAN__
  7304. #define vld1_dup_p16(__p0) __extension__ ({ \
  7305. poly16x4_t __ret; \
  7306. __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
  7307. __ret; \
  7308. })
  7309. #else
  7310. #define vld1_dup_p16(__p0) __extension__ ({ \
  7311. poly16x4_t __ret; \
  7312. __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
  7313. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7314. __ret; \
  7315. })
  7316. #endif
  7317. #ifdef __LITTLE_ENDIAN__
  7318. #define vld1q_dup_p8(__p0) __extension__ ({ \
  7319. poly8x16_t __ret; \
  7320. __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
  7321. __ret; \
  7322. })
  7323. #else
  7324. #define vld1q_dup_p8(__p0) __extension__ ({ \
  7325. poly8x16_t __ret; \
  7326. __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
  7327. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7328. __ret; \
  7329. })
  7330. #endif
  7331. #ifdef __LITTLE_ENDIAN__
  7332. #define vld1q_dup_p16(__p0) __extension__ ({ \
  7333. poly16x8_t __ret; \
  7334. __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
  7335. __ret; \
  7336. })
  7337. #else
  7338. #define vld1q_dup_p16(__p0) __extension__ ({ \
  7339. poly16x8_t __ret; \
  7340. __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
  7341. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7342. __ret; \
  7343. })
  7344. #endif
  7345. #ifdef __LITTLE_ENDIAN__
  7346. #define vld1q_dup_u8(__p0) __extension__ ({ \
  7347. uint8x16_t __ret; \
  7348. __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
  7349. __ret; \
  7350. })
  7351. #else
  7352. #define vld1q_dup_u8(__p0) __extension__ ({ \
  7353. uint8x16_t __ret; \
  7354. __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
  7355. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7356. __ret; \
  7357. })
  7358. #endif
  7359. #ifdef __LITTLE_ENDIAN__
  7360. #define vld1q_dup_u32(__p0) __extension__ ({ \
  7361. uint32x4_t __ret; \
  7362. __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
  7363. __ret; \
  7364. })
  7365. #else
  7366. #define vld1q_dup_u32(__p0) __extension__ ({ \
  7367. uint32x4_t __ret; \
  7368. __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
  7369. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7370. __ret; \
  7371. })
  7372. #endif
  7373. #ifdef __LITTLE_ENDIAN__
  7374. #define vld1q_dup_u64(__p0) __extension__ ({ \
  7375. uint64x2_t __ret; \
  7376. __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
  7377. __ret; \
  7378. })
  7379. #else
  7380. #define vld1q_dup_u64(__p0) __extension__ ({ \
  7381. uint64x2_t __ret; \
  7382. __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
  7383. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7384. __ret; \
  7385. })
  7386. #endif
  7387. #ifdef __LITTLE_ENDIAN__
  7388. #define vld1q_dup_u16(__p0) __extension__ ({ \
  7389. uint16x8_t __ret; \
  7390. __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
  7391. __ret; \
  7392. })
  7393. #else
  7394. #define vld1q_dup_u16(__p0) __extension__ ({ \
  7395. uint16x8_t __ret; \
  7396. __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
  7397. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7398. __ret; \
  7399. })
  7400. #endif
  7401. #ifdef __LITTLE_ENDIAN__
  7402. #define vld1q_dup_s8(__p0) __extension__ ({ \
  7403. int8x16_t __ret; \
  7404. __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
  7405. __ret; \
  7406. })
  7407. #else
  7408. #define vld1q_dup_s8(__p0) __extension__ ({ \
  7409. int8x16_t __ret; \
  7410. __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
  7411. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7412. __ret; \
  7413. })
  7414. #endif
  7415. #ifdef __LITTLE_ENDIAN__
  7416. #define vld1q_dup_f32(__p0) __extension__ ({ \
  7417. float32x4_t __ret; \
  7418. __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
  7419. __ret; \
  7420. })
  7421. #else
  7422. #define vld1q_dup_f32(__p0) __extension__ ({ \
  7423. float32x4_t __ret; \
  7424. __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
  7425. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7426. __ret; \
  7427. })
  7428. #endif
  7429. #ifdef __LITTLE_ENDIAN__
  7430. #define vld1q_dup_f16(__p0) __extension__ ({ \
  7431. float16x8_t __ret; \
  7432. __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
  7433. __ret; \
  7434. })
  7435. #else
  7436. #define vld1q_dup_f16(__p0) __extension__ ({ \
  7437. float16x8_t __ret; \
  7438. __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
  7439. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7440. __ret; \
  7441. })
  7442. #endif
  7443. #ifdef __LITTLE_ENDIAN__
  7444. #define vld1q_dup_s32(__p0) __extension__ ({ \
  7445. int32x4_t __ret; \
  7446. __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
  7447. __ret; \
  7448. })
  7449. #else
  7450. #define vld1q_dup_s32(__p0) __extension__ ({ \
  7451. int32x4_t __ret; \
  7452. __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
  7453. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7454. __ret; \
  7455. })
  7456. #endif
  7457. #ifdef __LITTLE_ENDIAN__
  7458. #define vld1q_dup_s64(__p0) __extension__ ({ \
  7459. int64x2_t __ret; \
  7460. __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
  7461. __ret; \
  7462. })
  7463. #else
  7464. #define vld1q_dup_s64(__p0) __extension__ ({ \
  7465. int64x2_t __ret; \
  7466. __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
  7467. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7468. __ret; \
  7469. })
  7470. #endif
  7471. #ifdef __LITTLE_ENDIAN__
  7472. #define vld1q_dup_s16(__p0) __extension__ ({ \
  7473. int16x8_t __ret; \
  7474. __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
  7475. __ret; \
  7476. })
  7477. #else
  7478. #define vld1q_dup_s16(__p0) __extension__ ({ \
  7479. int16x8_t __ret; \
  7480. __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
  7481. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7482. __ret; \
  7483. })
  7484. #endif
  7485. #ifdef __LITTLE_ENDIAN__
  7486. #define vld1_dup_u8(__p0) __extension__ ({ \
  7487. uint8x8_t __ret; \
  7488. __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
  7489. __ret; \
  7490. })
  7491. #else
  7492. #define vld1_dup_u8(__p0) __extension__ ({ \
  7493. uint8x8_t __ret; \
  7494. __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
  7495. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7496. __ret; \
  7497. })
  7498. #endif
  7499. #ifdef __LITTLE_ENDIAN__
  7500. #define vld1_dup_u32(__p0) __extension__ ({ \
  7501. uint32x2_t __ret; \
  7502. __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
  7503. __ret; \
  7504. })
  7505. #else
  7506. #define vld1_dup_u32(__p0) __extension__ ({ \
  7507. uint32x2_t __ret; \
  7508. __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
  7509. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7510. __ret; \
  7511. })
  7512. #endif
  7513. #ifdef __LITTLE_ENDIAN__
  7514. #define vld1_dup_u64(__p0) __extension__ ({ \
  7515. uint64x1_t __ret; \
  7516. __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
  7517. __ret; \
  7518. })
  7519. #else
  7520. #define vld1_dup_u64(__p0) __extension__ ({ \
  7521. uint64x1_t __ret; \
  7522. __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
  7523. __ret; \
  7524. })
  7525. #endif
  7526. #ifdef __LITTLE_ENDIAN__
  7527. #define vld1_dup_u16(__p0) __extension__ ({ \
  7528. uint16x4_t __ret; \
  7529. __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
  7530. __ret; \
  7531. })
  7532. #else
  7533. #define vld1_dup_u16(__p0) __extension__ ({ \
  7534. uint16x4_t __ret; \
  7535. __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
  7536. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7537. __ret; \
  7538. })
  7539. #endif
  7540. #ifdef __LITTLE_ENDIAN__
  7541. #define vld1_dup_s8(__p0) __extension__ ({ \
  7542. int8x8_t __ret; \
  7543. __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
  7544. __ret; \
  7545. })
  7546. #else
  7547. #define vld1_dup_s8(__p0) __extension__ ({ \
  7548. int8x8_t __ret; \
  7549. __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
  7550. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7551. __ret; \
  7552. })
  7553. #endif
  7554. #ifdef __LITTLE_ENDIAN__
  7555. #define vld1_dup_f32(__p0) __extension__ ({ \
  7556. float32x2_t __ret; \
  7557. __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
  7558. __ret; \
  7559. })
  7560. #else
  7561. #define vld1_dup_f32(__p0) __extension__ ({ \
  7562. float32x2_t __ret; \
  7563. __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
  7564. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7565. __ret; \
  7566. })
  7567. #endif
  7568. #ifdef __LITTLE_ENDIAN__
  7569. #define vld1_dup_f16(__p0) __extension__ ({ \
  7570. float16x4_t __ret; \
  7571. __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
  7572. __ret; \
  7573. })
  7574. #else
  7575. #define vld1_dup_f16(__p0) __extension__ ({ \
  7576. float16x4_t __ret; \
  7577. __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
  7578. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7579. __ret; \
  7580. })
  7581. #endif
  7582. #ifdef __LITTLE_ENDIAN__
  7583. #define vld1_dup_s32(__p0) __extension__ ({ \
  7584. int32x2_t __ret; \
  7585. __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
  7586. __ret; \
  7587. })
  7588. #else
  7589. #define vld1_dup_s32(__p0) __extension__ ({ \
  7590. int32x2_t __ret; \
  7591. __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
  7592. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7593. __ret; \
  7594. })
  7595. #endif
  7596. #ifdef __LITTLE_ENDIAN__
  7597. #define vld1_dup_s64(__p0) __extension__ ({ \
  7598. int64x1_t __ret; \
  7599. __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
  7600. __ret; \
  7601. })
  7602. #else
  7603. #define vld1_dup_s64(__p0) __extension__ ({ \
  7604. int64x1_t __ret; \
  7605. __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
  7606. __ret; \
  7607. })
  7608. #endif
  7609. #ifdef __LITTLE_ENDIAN__
  7610. #define vld1_dup_s16(__p0) __extension__ ({ \
  7611. int16x4_t __ret; \
  7612. __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
  7613. __ret; \
  7614. })
  7615. #else
  7616. #define vld1_dup_s16(__p0) __extension__ ({ \
  7617. int16x4_t __ret; \
  7618. __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
  7619. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7620. __ret; \
  7621. })
  7622. #endif
  7623. #ifdef __LITTLE_ENDIAN__
  7624. #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  7625. poly8x8_t __s1 = __p1; \
  7626. poly8x8_t __ret; \
  7627. __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
  7628. __ret; \
  7629. })
  7630. #else
  7631. #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  7632. poly8x8_t __s1 = __p1; \
  7633. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7634. poly8x8_t __ret; \
  7635. __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
  7636. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7637. __ret; \
  7638. })
  7639. #endif
  7640. #ifdef __LITTLE_ENDIAN__
  7641. #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  7642. poly16x4_t __s1 = __p1; \
  7643. poly16x4_t __ret; \
  7644. __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
  7645. __ret; \
  7646. })
  7647. #else
  7648. #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  7649. poly16x4_t __s1 = __p1; \
  7650. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7651. poly16x4_t __ret; \
  7652. __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
  7653. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7654. __ret; \
  7655. })
  7656. #endif
  7657. #ifdef __LITTLE_ENDIAN__
  7658. #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  7659. poly8x16_t __s1 = __p1; \
  7660. poly8x16_t __ret; \
  7661. __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
  7662. __ret; \
  7663. })
  7664. #else
  7665. #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  7666. poly8x16_t __s1 = __p1; \
  7667. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7668. poly8x16_t __ret; \
  7669. __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
  7670. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7671. __ret; \
  7672. })
  7673. #endif
  7674. #ifdef __LITTLE_ENDIAN__
  7675. #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  7676. poly16x8_t __s1 = __p1; \
  7677. poly16x8_t __ret; \
  7678. __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
  7679. __ret; \
  7680. })
  7681. #else
  7682. #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  7683. poly16x8_t __s1 = __p1; \
  7684. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7685. poly16x8_t __ret; \
  7686. __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
  7687. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7688. __ret; \
  7689. })
  7690. #endif
  7691. #ifdef __LITTLE_ENDIAN__
  7692. #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  7693. uint8x16_t __s1 = __p1; \
  7694. uint8x16_t __ret; \
  7695. __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
  7696. __ret; \
  7697. })
  7698. #else
  7699. #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  7700. uint8x16_t __s1 = __p1; \
  7701. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7702. uint8x16_t __ret; \
  7703. __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
  7704. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7705. __ret; \
  7706. })
  7707. #endif
  7708. #ifdef __LITTLE_ENDIAN__
  7709. #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  7710. uint32x4_t __s1 = __p1; \
  7711. uint32x4_t __ret; \
  7712. __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
  7713. __ret; \
  7714. })
  7715. #else
  7716. #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  7717. uint32x4_t __s1 = __p1; \
  7718. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7719. uint32x4_t __ret; \
  7720. __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
  7721. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7722. __ret; \
  7723. })
  7724. #endif
  7725. #ifdef __LITTLE_ENDIAN__
  7726. #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  7727. uint64x2_t __s1 = __p1; \
  7728. uint64x2_t __ret; \
  7729. __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
  7730. __ret; \
  7731. })
  7732. #else
  7733. #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  7734. uint64x2_t __s1 = __p1; \
  7735. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  7736. uint64x2_t __ret; \
  7737. __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
  7738. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7739. __ret; \
  7740. })
  7741. #endif
  7742. #ifdef __LITTLE_ENDIAN__
  7743. #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  7744. uint16x8_t __s1 = __p1; \
  7745. uint16x8_t __ret; \
  7746. __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
  7747. __ret; \
  7748. })
  7749. #else
  7750. #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  7751. uint16x8_t __s1 = __p1; \
  7752. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7753. uint16x8_t __ret; \
  7754. __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
  7755. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7756. __ret; \
  7757. })
  7758. #endif
  7759. #ifdef __LITTLE_ENDIAN__
  7760. #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  7761. int8x16_t __s1 = __p1; \
  7762. int8x16_t __ret; \
  7763. __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
  7764. __ret; \
  7765. })
  7766. #else
  7767. #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  7768. int8x16_t __s1 = __p1; \
  7769. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7770. int8x16_t __ret; \
  7771. __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
  7772. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  7773. __ret; \
  7774. })
  7775. #endif
  7776. #ifdef __LITTLE_ENDIAN__
  7777. #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  7778. float32x4_t __s1 = __p1; \
  7779. float32x4_t __ret; \
  7780. __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
  7781. __ret; \
  7782. })
  7783. #else
  7784. #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  7785. float32x4_t __s1 = __p1; \
  7786. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7787. float32x4_t __ret; \
  7788. __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
  7789. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7790. __ret; \
  7791. })
  7792. #endif
  7793. #ifdef __LITTLE_ENDIAN__
  7794. #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  7795. float16x8_t __s1 = __p1; \
  7796. float16x8_t __ret; \
  7797. __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
  7798. __ret; \
  7799. })
  7800. #else
  7801. #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  7802. float16x8_t __s1 = __p1; \
  7803. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7804. float16x8_t __ret; \
  7805. __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
  7806. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7807. __ret; \
  7808. })
  7809. #endif
  7810. #ifdef __LITTLE_ENDIAN__
  7811. #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  7812. int32x4_t __s1 = __p1; \
  7813. int32x4_t __ret; \
  7814. __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
  7815. __ret; \
  7816. })
  7817. #else
  7818. #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  7819. int32x4_t __s1 = __p1; \
  7820. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7821. int32x4_t __ret; \
  7822. __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
  7823. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7824. __ret; \
  7825. })
  7826. #endif
  7827. #ifdef __LITTLE_ENDIAN__
  7828. #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  7829. int64x2_t __s1 = __p1; \
  7830. int64x2_t __ret; \
  7831. __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
  7832. __ret; \
  7833. })
  7834. #else
  7835. #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  7836. int64x2_t __s1 = __p1; \
  7837. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  7838. int64x2_t __ret; \
  7839. __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
  7840. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7841. __ret; \
  7842. })
  7843. #endif
  7844. #ifdef __LITTLE_ENDIAN__
  7845. #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  7846. int16x8_t __s1 = __p1; \
  7847. int16x8_t __ret; \
  7848. __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
  7849. __ret; \
  7850. })
  7851. #else
  7852. #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  7853. int16x8_t __s1 = __p1; \
  7854. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7855. int16x8_t __ret; \
  7856. __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
  7857. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7858. __ret; \
  7859. })
  7860. #endif
  7861. #ifdef __LITTLE_ENDIAN__
  7862. #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  7863. uint8x8_t __s1 = __p1; \
  7864. uint8x8_t __ret; \
  7865. __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
  7866. __ret; \
  7867. })
  7868. #else
  7869. #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  7870. uint8x8_t __s1 = __p1; \
  7871. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7872. uint8x8_t __ret; \
  7873. __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
  7874. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7875. __ret; \
  7876. })
  7877. #endif
  7878. #ifdef __LITTLE_ENDIAN__
  7879. #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  7880. uint32x2_t __s1 = __p1; \
  7881. uint32x2_t __ret; \
  7882. __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
  7883. __ret; \
  7884. })
  7885. #else
  7886. #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  7887. uint32x2_t __s1 = __p1; \
  7888. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  7889. uint32x2_t __ret; \
  7890. __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
  7891. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7892. __ret; \
  7893. })
  7894. #endif
  7895. #ifdef __LITTLE_ENDIAN__
  7896. #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  7897. uint64x1_t __s1 = __p1; \
  7898. uint64x1_t __ret; \
  7899. __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
  7900. __ret; \
  7901. })
  7902. #else
  7903. #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  7904. uint64x1_t __s1 = __p1; \
  7905. uint64x1_t __ret; \
  7906. __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
  7907. __ret; \
  7908. })
  7909. #endif
  7910. #ifdef __LITTLE_ENDIAN__
  7911. #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  7912. uint16x4_t __s1 = __p1; \
  7913. uint16x4_t __ret; \
  7914. __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
  7915. __ret; \
  7916. })
  7917. #else
  7918. #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  7919. uint16x4_t __s1 = __p1; \
  7920. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7921. uint16x4_t __ret; \
  7922. __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
  7923. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7924. __ret; \
  7925. })
  7926. #endif
  7927. #ifdef __LITTLE_ENDIAN__
  7928. #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  7929. int8x8_t __s1 = __p1; \
  7930. int8x8_t __ret; \
  7931. __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
  7932. __ret; \
  7933. })
  7934. #else
  7935. #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  7936. int8x8_t __s1 = __p1; \
  7937. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  7938. int8x8_t __ret; \
  7939. __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
  7940. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  7941. __ret; \
  7942. })
  7943. #endif
  7944. #ifdef __LITTLE_ENDIAN__
  7945. #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  7946. float32x2_t __s1 = __p1; \
  7947. float32x2_t __ret; \
  7948. __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
  7949. __ret; \
  7950. })
  7951. #else
  7952. #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  7953. float32x2_t __s1 = __p1; \
  7954. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  7955. float32x2_t __ret; \
  7956. __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
  7957. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7958. __ret; \
  7959. })
  7960. #endif
  7961. #ifdef __LITTLE_ENDIAN__
  7962. #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  7963. float16x4_t __s1 = __p1; \
  7964. float16x4_t __ret; \
  7965. __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
  7966. __ret; \
  7967. })
  7968. #else
  7969. #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  7970. float16x4_t __s1 = __p1; \
  7971. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  7972. float16x4_t __ret; \
  7973. __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
  7974. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  7975. __ret; \
  7976. })
  7977. #endif
  7978. #ifdef __LITTLE_ENDIAN__
  7979. #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  7980. int32x2_t __s1 = __p1; \
  7981. int32x2_t __ret; \
  7982. __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
  7983. __ret; \
  7984. })
  7985. #else
  7986. #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  7987. int32x2_t __s1 = __p1; \
  7988. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  7989. int32x2_t __ret; \
  7990. __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
  7991. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  7992. __ret; \
  7993. })
  7994. #endif
  7995. #ifdef __LITTLE_ENDIAN__
  7996. #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  7997. int64x1_t __s1 = __p1; \
  7998. int64x1_t __ret; \
  7999. __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
  8000. __ret; \
  8001. })
  8002. #else
  8003. #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  8004. int64x1_t __s1 = __p1; \
  8005. int64x1_t __ret; \
  8006. __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
  8007. __ret; \
  8008. })
  8009. #endif
  8010. #ifdef __LITTLE_ENDIAN__
  8011. #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8012. int16x4_t __s1 = __p1; \
  8013. int16x4_t __ret; \
  8014. __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
  8015. __ret; \
  8016. })
  8017. #else
  8018. #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8019. int16x4_t __s1 = __p1; \
  8020. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  8021. int16x4_t __ret; \
  8022. __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
  8023. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  8024. __ret; \
  8025. })
  8026. #endif
  8027. #ifdef __LITTLE_ENDIAN__
  8028. #define vld2_p8(__p0) __extension__ ({ \
  8029. poly8x8x2_t __ret; \
  8030. __builtin_neon_vld2_v(&__ret, __p0, 4); \
  8031. __ret; \
  8032. })
  8033. #else
  8034. #define vld2_p8(__p0) __extension__ ({ \
  8035. poly8x8x2_t __ret; \
  8036. __builtin_neon_vld2_v(&__ret, __p0, 4); \
  8037. \
  8038. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8039. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8040. __ret; \
  8041. })
  8042. #endif
  8043. #ifdef __LITTLE_ENDIAN__
  8044. #define vld2_p16(__p0) __extension__ ({ \
  8045. poly16x4x2_t __ret; \
  8046. __builtin_neon_vld2_v(&__ret, __p0, 5); \
  8047. __ret; \
  8048. })
  8049. #else
  8050. #define vld2_p16(__p0) __extension__ ({ \
  8051. poly16x4x2_t __ret; \
  8052. __builtin_neon_vld2_v(&__ret, __p0, 5); \
  8053. \
  8054. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8055. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8056. __ret; \
  8057. })
  8058. #endif
  8059. #ifdef __LITTLE_ENDIAN__
  8060. #define vld2q_p8(__p0) __extension__ ({ \
  8061. poly8x16x2_t __ret; \
  8062. __builtin_neon_vld2q_v(&__ret, __p0, 36); \
  8063. __ret; \
  8064. })
  8065. #else
  8066. #define vld2q_p8(__p0) __extension__ ({ \
  8067. poly8x16x2_t __ret; \
  8068. __builtin_neon_vld2q_v(&__ret, __p0, 36); \
  8069. \
  8070. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8071. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8072. __ret; \
  8073. })
  8074. #endif
  8075. #ifdef __LITTLE_ENDIAN__
  8076. #define vld2q_p16(__p0) __extension__ ({ \
  8077. poly16x8x2_t __ret; \
  8078. __builtin_neon_vld2q_v(&__ret, __p0, 37); \
  8079. __ret; \
  8080. })
  8081. #else
  8082. #define vld2q_p16(__p0) __extension__ ({ \
  8083. poly16x8x2_t __ret; \
  8084. __builtin_neon_vld2q_v(&__ret, __p0, 37); \
  8085. \
  8086. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8087. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8088. __ret; \
  8089. })
  8090. #endif
  8091. #ifdef __LITTLE_ENDIAN__
  8092. #define vld2q_u8(__p0) __extension__ ({ \
  8093. uint8x16x2_t __ret; \
  8094. __builtin_neon_vld2q_v(&__ret, __p0, 48); \
  8095. __ret; \
  8096. })
  8097. #else
  8098. #define vld2q_u8(__p0) __extension__ ({ \
  8099. uint8x16x2_t __ret; \
  8100. __builtin_neon_vld2q_v(&__ret, __p0, 48); \
  8101. \
  8102. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8103. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8104. __ret; \
  8105. })
  8106. #endif
  8107. #ifdef __LITTLE_ENDIAN__
  8108. #define vld2q_u32(__p0) __extension__ ({ \
  8109. uint32x4x2_t __ret; \
  8110. __builtin_neon_vld2q_v(&__ret, __p0, 50); \
  8111. __ret; \
  8112. })
  8113. #else
  8114. #define vld2q_u32(__p0) __extension__ ({ \
  8115. uint32x4x2_t __ret; \
  8116. __builtin_neon_vld2q_v(&__ret, __p0, 50); \
  8117. \
  8118. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8119. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8120. __ret; \
  8121. })
  8122. #endif
  8123. #ifdef __LITTLE_ENDIAN__
  8124. #define vld2q_u16(__p0) __extension__ ({ \
  8125. uint16x8x2_t __ret; \
  8126. __builtin_neon_vld2q_v(&__ret, __p0, 49); \
  8127. __ret; \
  8128. })
  8129. #else
  8130. #define vld2q_u16(__p0) __extension__ ({ \
  8131. uint16x8x2_t __ret; \
  8132. __builtin_neon_vld2q_v(&__ret, __p0, 49); \
  8133. \
  8134. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8135. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8136. __ret; \
  8137. })
  8138. #endif
  8139. #ifdef __LITTLE_ENDIAN__
  8140. #define vld2q_s8(__p0) __extension__ ({ \
  8141. int8x16x2_t __ret; \
  8142. __builtin_neon_vld2q_v(&__ret, __p0, 32); \
  8143. __ret; \
  8144. })
  8145. #else
  8146. #define vld2q_s8(__p0) __extension__ ({ \
  8147. int8x16x2_t __ret; \
  8148. __builtin_neon_vld2q_v(&__ret, __p0, 32); \
  8149. \
  8150. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8151. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8152. __ret; \
  8153. })
  8154. #endif
  8155. #ifdef __LITTLE_ENDIAN__
  8156. #define vld2q_f32(__p0) __extension__ ({ \
  8157. float32x4x2_t __ret; \
  8158. __builtin_neon_vld2q_v(&__ret, __p0, 41); \
  8159. __ret; \
  8160. })
  8161. #else
  8162. #define vld2q_f32(__p0) __extension__ ({ \
  8163. float32x4x2_t __ret; \
  8164. __builtin_neon_vld2q_v(&__ret, __p0, 41); \
  8165. \
  8166. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8167. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8168. __ret; \
  8169. })
  8170. #endif
  8171. #ifdef __LITTLE_ENDIAN__
  8172. #define vld2q_f16(__p0) __extension__ ({ \
  8173. float16x8x2_t __ret; \
  8174. __builtin_neon_vld2q_v(&__ret, __p0, 40); \
  8175. __ret; \
  8176. })
  8177. #else
  8178. #define vld2q_f16(__p0) __extension__ ({ \
  8179. float16x8x2_t __ret; \
  8180. __builtin_neon_vld2q_v(&__ret, __p0, 40); \
  8181. \
  8182. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8183. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8184. __ret; \
  8185. })
  8186. #endif
  8187. #ifdef __LITTLE_ENDIAN__
  8188. #define vld2q_s32(__p0) __extension__ ({ \
  8189. int32x4x2_t __ret; \
  8190. __builtin_neon_vld2q_v(&__ret, __p0, 34); \
  8191. __ret; \
  8192. })
  8193. #else
  8194. #define vld2q_s32(__p0) __extension__ ({ \
  8195. int32x4x2_t __ret; \
  8196. __builtin_neon_vld2q_v(&__ret, __p0, 34); \
  8197. \
  8198. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8199. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8200. __ret; \
  8201. })
  8202. #endif
  8203. #ifdef __LITTLE_ENDIAN__
  8204. #define vld2q_s16(__p0) __extension__ ({ \
  8205. int16x8x2_t __ret; \
  8206. __builtin_neon_vld2q_v(&__ret, __p0, 33); \
  8207. __ret; \
  8208. })
  8209. #else
  8210. #define vld2q_s16(__p0) __extension__ ({ \
  8211. int16x8x2_t __ret; \
  8212. __builtin_neon_vld2q_v(&__ret, __p0, 33); \
  8213. \
  8214. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8215. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8216. __ret; \
  8217. })
  8218. #endif
  8219. #ifdef __LITTLE_ENDIAN__
  8220. #define vld2_u8(__p0) __extension__ ({ \
  8221. uint8x8x2_t __ret; \
  8222. __builtin_neon_vld2_v(&__ret, __p0, 16); \
  8223. __ret; \
  8224. })
  8225. #else
  8226. #define vld2_u8(__p0) __extension__ ({ \
  8227. uint8x8x2_t __ret; \
  8228. __builtin_neon_vld2_v(&__ret, __p0, 16); \
  8229. \
  8230. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8231. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8232. __ret; \
  8233. })
  8234. #endif
  8235. #ifdef __LITTLE_ENDIAN__
  8236. #define vld2_u32(__p0) __extension__ ({ \
  8237. uint32x2x2_t __ret; \
  8238. __builtin_neon_vld2_v(&__ret, __p0, 18); \
  8239. __ret; \
  8240. })
  8241. #else
  8242. #define vld2_u32(__p0) __extension__ ({ \
  8243. uint32x2x2_t __ret; \
  8244. __builtin_neon_vld2_v(&__ret, __p0, 18); \
  8245. \
  8246. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8247. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8248. __ret; \
  8249. })
  8250. #endif
  8251. #ifdef __LITTLE_ENDIAN__
  8252. #define vld2_u64(__p0) __extension__ ({ \
  8253. uint64x1x2_t __ret; \
  8254. __builtin_neon_vld2_v(&__ret, __p0, 19); \
  8255. __ret; \
  8256. })
  8257. #else
  8258. #define vld2_u64(__p0) __extension__ ({ \
  8259. uint64x1x2_t __ret; \
  8260. __builtin_neon_vld2_v(&__ret, __p0, 19); \
  8261. __ret; \
  8262. })
  8263. #endif
  8264. #ifdef __LITTLE_ENDIAN__
  8265. #define vld2_u16(__p0) __extension__ ({ \
  8266. uint16x4x2_t __ret; \
  8267. __builtin_neon_vld2_v(&__ret, __p0, 17); \
  8268. __ret; \
  8269. })
  8270. #else
  8271. #define vld2_u16(__p0) __extension__ ({ \
  8272. uint16x4x2_t __ret; \
  8273. __builtin_neon_vld2_v(&__ret, __p0, 17); \
  8274. \
  8275. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8276. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8277. __ret; \
  8278. })
  8279. #endif
  8280. #ifdef __LITTLE_ENDIAN__
  8281. #define vld2_s8(__p0) __extension__ ({ \
  8282. int8x8x2_t __ret; \
  8283. __builtin_neon_vld2_v(&__ret, __p0, 0); \
  8284. __ret; \
  8285. })
  8286. #else
  8287. #define vld2_s8(__p0) __extension__ ({ \
  8288. int8x8x2_t __ret; \
  8289. __builtin_neon_vld2_v(&__ret, __p0, 0); \
  8290. \
  8291. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8292. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8293. __ret; \
  8294. })
  8295. #endif
  8296. #ifdef __LITTLE_ENDIAN__
  8297. #define vld2_f32(__p0) __extension__ ({ \
  8298. float32x2x2_t __ret; \
  8299. __builtin_neon_vld2_v(&__ret, __p0, 9); \
  8300. __ret; \
  8301. })
  8302. #else
  8303. #define vld2_f32(__p0) __extension__ ({ \
  8304. float32x2x2_t __ret; \
  8305. __builtin_neon_vld2_v(&__ret, __p0, 9); \
  8306. \
  8307. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8308. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8309. __ret; \
  8310. })
  8311. #endif
  8312. #ifdef __LITTLE_ENDIAN__
  8313. #define vld2_f16(__p0) __extension__ ({ \
  8314. float16x4x2_t __ret; \
  8315. __builtin_neon_vld2_v(&__ret, __p0, 8); \
  8316. __ret; \
  8317. })
  8318. #else
  8319. #define vld2_f16(__p0) __extension__ ({ \
  8320. float16x4x2_t __ret; \
  8321. __builtin_neon_vld2_v(&__ret, __p0, 8); \
  8322. \
  8323. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8324. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8325. __ret; \
  8326. })
  8327. #endif
  8328. #ifdef __LITTLE_ENDIAN__
  8329. #define vld2_s32(__p0) __extension__ ({ \
  8330. int32x2x2_t __ret; \
  8331. __builtin_neon_vld2_v(&__ret, __p0, 2); \
  8332. __ret; \
  8333. })
  8334. #else
  8335. #define vld2_s32(__p0) __extension__ ({ \
  8336. int32x2x2_t __ret; \
  8337. __builtin_neon_vld2_v(&__ret, __p0, 2); \
  8338. \
  8339. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8340. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8341. __ret; \
  8342. })
  8343. #endif
  8344. #ifdef __LITTLE_ENDIAN__
  8345. #define vld2_s64(__p0) __extension__ ({ \
  8346. int64x1x2_t __ret; \
  8347. __builtin_neon_vld2_v(&__ret, __p0, 3); \
  8348. __ret; \
  8349. })
  8350. #else
  8351. #define vld2_s64(__p0) __extension__ ({ \
  8352. int64x1x2_t __ret; \
  8353. __builtin_neon_vld2_v(&__ret, __p0, 3); \
  8354. __ret; \
  8355. })
  8356. #endif
  8357. #ifdef __LITTLE_ENDIAN__
  8358. #define vld2_s16(__p0) __extension__ ({ \
  8359. int16x4x2_t __ret; \
  8360. __builtin_neon_vld2_v(&__ret, __p0, 1); \
  8361. __ret; \
  8362. })
  8363. #else
  8364. #define vld2_s16(__p0) __extension__ ({ \
  8365. int16x4x2_t __ret; \
  8366. __builtin_neon_vld2_v(&__ret, __p0, 1); \
  8367. \
  8368. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8369. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8370. __ret; \
  8371. })
  8372. #endif
  8373. #ifdef __LITTLE_ENDIAN__
  8374. #define vld2_dup_p8(__p0) __extension__ ({ \
  8375. poly8x8x2_t __ret; \
  8376. __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
  8377. __ret; \
  8378. })
  8379. #else
  8380. #define vld2_dup_p8(__p0) __extension__ ({ \
  8381. poly8x8x2_t __ret; \
  8382. __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
  8383. \
  8384. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8385. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8386. __ret; \
  8387. })
  8388. #endif
  8389. #ifdef __LITTLE_ENDIAN__
  8390. #define vld2_dup_p16(__p0) __extension__ ({ \
  8391. poly16x4x2_t __ret; \
  8392. __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
  8393. __ret; \
  8394. })
  8395. #else
  8396. #define vld2_dup_p16(__p0) __extension__ ({ \
  8397. poly16x4x2_t __ret; \
  8398. __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
  8399. \
  8400. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8401. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8402. __ret; \
  8403. })
  8404. #endif
  8405. #ifdef __LITTLE_ENDIAN__
  8406. #define vld2_dup_u8(__p0) __extension__ ({ \
  8407. uint8x8x2_t __ret; \
  8408. __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
  8409. __ret; \
  8410. })
  8411. #else
  8412. #define vld2_dup_u8(__p0) __extension__ ({ \
  8413. uint8x8x2_t __ret; \
  8414. __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
  8415. \
  8416. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8417. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8418. __ret; \
  8419. })
  8420. #endif
  8421. #ifdef __LITTLE_ENDIAN__
  8422. #define vld2_dup_u32(__p0) __extension__ ({ \
  8423. uint32x2x2_t __ret; \
  8424. __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
  8425. __ret; \
  8426. })
  8427. #else
  8428. #define vld2_dup_u32(__p0) __extension__ ({ \
  8429. uint32x2x2_t __ret; \
  8430. __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
  8431. \
  8432. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8433. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8434. __ret; \
  8435. })
  8436. #endif
  8437. #ifdef __LITTLE_ENDIAN__
  8438. #define vld2_dup_u64(__p0) __extension__ ({ \
  8439. uint64x1x2_t __ret; \
  8440. __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
  8441. __ret; \
  8442. })
  8443. #else
  8444. #define vld2_dup_u64(__p0) __extension__ ({ \
  8445. uint64x1x2_t __ret; \
  8446. __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
  8447. __ret; \
  8448. })
  8449. #endif
  8450. #ifdef __LITTLE_ENDIAN__
  8451. #define vld2_dup_u16(__p0) __extension__ ({ \
  8452. uint16x4x2_t __ret; \
  8453. __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
  8454. __ret; \
  8455. })
  8456. #else
  8457. #define vld2_dup_u16(__p0) __extension__ ({ \
  8458. uint16x4x2_t __ret; \
  8459. __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
  8460. \
  8461. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8462. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8463. __ret; \
  8464. })
  8465. #endif
  8466. #ifdef __LITTLE_ENDIAN__
  8467. #define vld2_dup_s8(__p0) __extension__ ({ \
  8468. int8x8x2_t __ret; \
  8469. __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
  8470. __ret; \
  8471. })
  8472. #else
  8473. #define vld2_dup_s8(__p0) __extension__ ({ \
  8474. int8x8x2_t __ret; \
  8475. __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
  8476. \
  8477. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8478. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8479. __ret; \
  8480. })
  8481. #endif
  8482. #ifdef __LITTLE_ENDIAN__
  8483. #define vld2_dup_f32(__p0) __extension__ ({ \
  8484. float32x2x2_t __ret; \
  8485. __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
  8486. __ret; \
  8487. })
  8488. #else
  8489. #define vld2_dup_f32(__p0) __extension__ ({ \
  8490. float32x2x2_t __ret; \
  8491. __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
  8492. \
  8493. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8494. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8495. __ret; \
  8496. })
  8497. #endif
  8498. #ifdef __LITTLE_ENDIAN__
  8499. #define vld2_dup_f16(__p0) __extension__ ({ \
  8500. float16x4x2_t __ret; \
  8501. __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
  8502. __ret; \
  8503. })
  8504. #else
  8505. #define vld2_dup_f16(__p0) __extension__ ({ \
  8506. float16x4x2_t __ret; \
  8507. __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
  8508. \
  8509. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8510. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8511. __ret; \
  8512. })
  8513. #endif
  8514. #ifdef __LITTLE_ENDIAN__
  8515. #define vld2_dup_s32(__p0) __extension__ ({ \
  8516. int32x2x2_t __ret; \
  8517. __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
  8518. __ret; \
  8519. })
  8520. #else
  8521. #define vld2_dup_s32(__p0) __extension__ ({ \
  8522. int32x2x2_t __ret; \
  8523. __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
  8524. \
  8525. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8526. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8527. __ret; \
  8528. })
  8529. #endif
  8530. #ifdef __LITTLE_ENDIAN__
  8531. #define vld2_dup_s64(__p0) __extension__ ({ \
  8532. int64x1x2_t __ret; \
  8533. __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
  8534. __ret; \
  8535. })
  8536. #else
  8537. #define vld2_dup_s64(__p0) __extension__ ({ \
  8538. int64x1x2_t __ret; \
  8539. __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
  8540. __ret; \
  8541. })
  8542. #endif
  8543. #ifdef __LITTLE_ENDIAN__
  8544. #define vld2_dup_s16(__p0) __extension__ ({ \
  8545. int16x4x2_t __ret; \
  8546. __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
  8547. __ret; \
  8548. })
  8549. #else
  8550. #define vld2_dup_s16(__p0) __extension__ ({ \
  8551. int16x4x2_t __ret; \
  8552. __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
  8553. \
  8554. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8555. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8556. __ret; \
  8557. })
  8558. #endif
  8559. #ifdef __LITTLE_ENDIAN__
  8560. #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  8561. poly8x8x2_t __s1 = __p1; \
  8562. poly8x8x2_t __ret; \
  8563. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
  8564. __ret; \
  8565. })
  8566. #else
  8567. #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  8568. poly8x8x2_t __s1 = __p1; \
  8569. poly8x8x2_t __rev1; \
  8570. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8571. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8572. poly8x8x2_t __ret; \
  8573. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
  8574. \
  8575. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8576. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8577. __ret; \
  8578. })
  8579. #endif
  8580. #ifdef __LITTLE_ENDIAN__
  8581. #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  8582. poly16x4x2_t __s1 = __p1; \
  8583. poly16x4x2_t __ret; \
  8584. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
  8585. __ret; \
  8586. })
  8587. #else
  8588. #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  8589. poly16x4x2_t __s1 = __p1; \
  8590. poly16x4x2_t __rev1; \
  8591. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8592. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8593. poly16x4x2_t __ret; \
  8594. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
  8595. \
  8596. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8597. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8598. __ret; \
  8599. })
  8600. #endif
  8601. #ifdef __LITTLE_ENDIAN__
  8602. #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  8603. poly16x8x2_t __s1 = __p1; \
  8604. poly16x8x2_t __ret; \
  8605. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
  8606. __ret; \
  8607. })
  8608. #else
  8609. #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  8610. poly16x8x2_t __s1 = __p1; \
  8611. poly16x8x2_t __rev1; \
  8612. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8613. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8614. poly16x8x2_t __ret; \
  8615. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
  8616. \
  8617. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8618. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8619. __ret; \
  8620. })
  8621. #endif
  8622. #ifdef __LITTLE_ENDIAN__
  8623. #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  8624. uint32x4x2_t __s1 = __p1; \
  8625. uint32x4x2_t __ret; \
  8626. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
  8627. __ret; \
  8628. })
  8629. #else
  8630. #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  8631. uint32x4x2_t __s1 = __p1; \
  8632. uint32x4x2_t __rev1; \
  8633. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8634. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8635. uint32x4x2_t __ret; \
  8636. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
  8637. \
  8638. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8639. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8640. __ret; \
  8641. })
  8642. #endif
  8643. #ifdef __LITTLE_ENDIAN__
  8644. #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  8645. uint16x8x2_t __s1 = __p1; \
  8646. uint16x8x2_t __ret; \
  8647. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
  8648. __ret; \
  8649. })
  8650. #else
  8651. #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  8652. uint16x8x2_t __s1 = __p1; \
  8653. uint16x8x2_t __rev1; \
  8654. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8655. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8656. uint16x8x2_t __ret; \
  8657. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
  8658. \
  8659. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8660. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8661. __ret; \
  8662. })
  8663. #endif
  8664. #ifdef __LITTLE_ENDIAN__
  8665. #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  8666. float32x4x2_t __s1 = __p1; \
  8667. float32x4x2_t __ret; \
  8668. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
  8669. __ret; \
  8670. })
  8671. #else
  8672. #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  8673. float32x4x2_t __s1 = __p1; \
  8674. float32x4x2_t __rev1; \
  8675. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8676. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8677. float32x4x2_t __ret; \
  8678. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
  8679. \
  8680. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8681. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8682. __ret; \
  8683. })
  8684. #endif
  8685. #ifdef __LITTLE_ENDIAN__
  8686. #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  8687. float16x8x2_t __s1 = __p1; \
  8688. float16x8x2_t __ret; \
  8689. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
  8690. __ret; \
  8691. })
  8692. #else
  8693. #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  8694. float16x8x2_t __s1 = __p1; \
  8695. float16x8x2_t __rev1; \
  8696. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8697. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8698. float16x8x2_t __ret; \
  8699. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
  8700. \
  8701. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8702. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8703. __ret; \
  8704. })
  8705. #endif
  8706. #ifdef __LITTLE_ENDIAN__
  8707. #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  8708. int32x4x2_t __s1 = __p1; \
  8709. int32x4x2_t __ret; \
  8710. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
  8711. __ret; \
  8712. })
  8713. #else
  8714. #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  8715. int32x4x2_t __s1 = __p1; \
  8716. int32x4x2_t __rev1; \
  8717. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8718. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8719. int32x4x2_t __ret; \
  8720. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
  8721. \
  8722. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8723. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8724. __ret; \
  8725. })
  8726. #endif
  8727. #ifdef __LITTLE_ENDIAN__
  8728. #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8729. int16x8x2_t __s1 = __p1; \
  8730. int16x8x2_t __ret; \
  8731. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
  8732. __ret; \
  8733. })
  8734. #else
  8735. #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8736. int16x8x2_t __s1 = __p1; \
  8737. int16x8x2_t __rev1; \
  8738. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8739. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8740. int16x8x2_t __ret; \
  8741. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
  8742. \
  8743. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8744. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8745. __ret; \
  8746. })
  8747. #endif
  8748. #ifdef __LITTLE_ENDIAN__
  8749. #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  8750. uint8x8x2_t __s1 = __p1; \
  8751. uint8x8x2_t __ret; \
  8752. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
  8753. __ret; \
  8754. })
  8755. #else
  8756. #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  8757. uint8x8x2_t __s1 = __p1; \
  8758. uint8x8x2_t __rev1; \
  8759. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8760. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8761. uint8x8x2_t __ret; \
  8762. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
  8763. \
  8764. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8765. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8766. __ret; \
  8767. })
  8768. #endif
  8769. #ifdef __LITTLE_ENDIAN__
  8770. #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  8771. uint32x2x2_t __s1 = __p1; \
  8772. uint32x2x2_t __ret; \
  8773. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
  8774. __ret; \
  8775. })
  8776. #else
  8777. #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  8778. uint32x2x2_t __s1 = __p1; \
  8779. uint32x2x2_t __rev1; \
  8780. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  8781. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  8782. uint32x2x2_t __ret; \
  8783. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
  8784. \
  8785. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8786. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8787. __ret; \
  8788. })
  8789. #endif
  8790. #ifdef __LITTLE_ENDIAN__
  8791. #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  8792. uint16x4x2_t __s1 = __p1; \
  8793. uint16x4x2_t __ret; \
  8794. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
  8795. __ret; \
  8796. })
  8797. #else
  8798. #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  8799. uint16x4x2_t __s1 = __p1; \
  8800. uint16x4x2_t __rev1; \
  8801. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8802. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8803. uint16x4x2_t __ret; \
  8804. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
  8805. \
  8806. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8807. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8808. __ret; \
  8809. })
  8810. #endif
  8811. #ifdef __LITTLE_ENDIAN__
  8812. #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  8813. int8x8x2_t __s1 = __p1; \
  8814. int8x8x2_t __ret; \
  8815. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
  8816. __ret; \
  8817. })
  8818. #else
  8819. #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  8820. int8x8x2_t __s1 = __p1; \
  8821. int8x8x2_t __rev1; \
  8822. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8823. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8824. int8x8x2_t __ret; \
  8825. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
  8826. \
  8827. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8828. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8829. __ret; \
  8830. })
  8831. #endif
  8832. #ifdef __LITTLE_ENDIAN__
  8833. #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  8834. float32x2x2_t __s1 = __p1; \
  8835. float32x2x2_t __ret; \
  8836. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
  8837. __ret; \
  8838. })
  8839. #else
  8840. #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  8841. float32x2x2_t __s1 = __p1; \
  8842. float32x2x2_t __rev1; \
  8843. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  8844. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  8845. float32x2x2_t __ret; \
  8846. __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
  8847. \
  8848. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8849. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8850. __ret; \
  8851. })
  8852. #endif
  8853. #ifdef __LITTLE_ENDIAN__
  8854. #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  8855. float16x4x2_t __s1 = __p1; \
  8856. float16x4x2_t __ret; \
  8857. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
  8858. __ret; \
  8859. })
  8860. #else
  8861. #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  8862. float16x4x2_t __s1 = __p1; \
  8863. float16x4x2_t __rev1; \
  8864. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8865. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8866. float16x4x2_t __ret; \
  8867. __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
  8868. \
  8869. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8870. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8871. __ret; \
  8872. })
  8873. #endif
  8874. #ifdef __LITTLE_ENDIAN__
  8875. #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  8876. int32x2x2_t __s1 = __p1; \
  8877. int32x2x2_t __ret; \
  8878. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
  8879. __ret; \
  8880. })
  8881. #else
  8882. #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  8883. int32x2x2_t __s1 = __p1; \
  8884. int32x2x2_t __rev1; \
  8885. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  8886. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  8887. int32x2x2_t __ret; \
  8888. __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
  8889. \
  8890. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  8891. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  8892. __ret; \
  8893. })
  8894. #endif
  8895. #ifdef __LITTLE_ENDIAN__
  8896. #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8897. int16x4x2_t __s1 = __p1; \
  8898. int16x4x2_t __ret; \
  8899. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
  8900. __ret; \
  8901. })
  8902. #else
  8903. #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  8904. int16x4x2_t __s1 = __p1; \
  8905. int16x4x2_t __rev1; \
  8906. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  8907. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  8908. int16x4x2_t __ret; \
  8909. __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
  8910. \
  8911. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8912. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8913. __ret; \
  8914. })
  8915. #endif
  8916. #ifdef __LITTLE_ENDIAN__
  8917. #define vld3_p8(__p0) __extension__ ({ \
  8918. poly8x8x3_t __ret; \
  8919. __builtin_neon_vld3_v(&__ret, __p0, 4); \
  8920. __ret; \
  8921. })
  8922. #else
  8923. #define vld3_p8(__p0) __extension__ ({ \
  8924. poly8x8x3_t __ret; \
  8925. __builtin_neon_vld3_v(&__ret, __p0, 4); \
  8926. \
  8927. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8928. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8929. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  8930. __ret; \
  8931. })
  8932. #endif
  8933. #ifdef __LITTLE_ENDIAN__
  8934. #define vld3_p16(__p0) __extension__ ({ \
  8935. poly16x4x3_t __ret; \
  8936. __builtin_neon_vld3_v(&__ret, __p0, 5); \
  8937. __ret; \
  8938. })
  8939. #else
  8940. #define vld3_p16(__p0) __extension__ ({ \
  8941. poly16x4x3_t __ret; \
  8942. __builtin_neon_vld3_v(&__ret, __p0, 5); \
  8943. \
  8944. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  8945. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  8946. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  8947. __ret; \
  8948. })
  8949. #endif
  8950. #ifdef __LITTLE_ENDIAN__
  8951. #define vld3q_p8(__p0) __extension__ ({ \
  8952. poly8x16x3_t __ret; \
  8953. __builtin_neon_vld3q_v(&__ret, __p0, 36); \
  8954. __ret; \
  8955. })
  8956. #else
  8957. #define vld3q_p8(__p0) __extension__ ({ \
  8958. poly8x16x3_t __ret; \
  8959. __builtin_neon_vld3q_v(&__ret, __p0, 36); \
  8960. \
  8961. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8962. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8963. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8964. __ret; \
  8965. })
  8966. #endif
  8967. #ifdef __LITTLE_ENDIAN__
  8968. #define vld3q_p16(__p0) __extension__ ({ \
  8969. poly16x8x3_t __ret; \
  8970. __builtin_neon_vld3q_v(&__ret, __p0, 37); \
  8971. __ret; \
  8972. })
  8973. #else
  8974. #define vld3q_p16(__p0) __extension__ ({ \
  8975. poly16x8x3_t __ret; \
  8976. __builtin_neon_vld3q_v(&__ret, __p0, 37); \
  8977. \
  8978. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  8979. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  8980. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  8981. __ret; \
  8982. })
  8983. #endif
  8984. #ifdef __LITTLE_ENDIAN__
  8985. #define vld3q_u8(__p0) __extension__ ({ \
  8986. uint8x16x3_t __ret; \
  8987. __builtin_neon_vld3q_v(&__ret, __p0, 48); \
  8988. __ret; \
  8989. })
  8990. #else
  8991. #define vld3q_u8(__p0) __extension__ ({ \
  8992. uint8x16x3_t __ret; \
  8993. __builtin_neon_vld3q_v(&__ret, __p0, 48); \
  8994. \
  8995. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8996. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8997. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  8998. __ret; \
  8999. })
  9000. #endif
  9001. #ifdef __LITTLE_ENDIAN__
  9002. #define vld3q_u32(__p0) __extension__ ({ \
  9003. uint32x4x3_t __ret; \
  9004. __builtin_neon_vld3q_v(&__ret, __p0, 50); \
  9005. __ret; \
  9006. })
  9007. #else
  9008. #define vld3q_u32(__p0) __extension__ ({ \
  9009. uint32x4x3_t __ret; \
  9010. __builtin_neon_vld3q_v(&__ret, __p0, 50); \
  9011. \
  9012. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9013. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9014. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9015. __ret; \
  9016. })
  9017. #endif
  9018. #ifdef __LITTLE_ENDIAN__
  9019. #define vld3q_u16(__p0) __extension__ ({ \
  9020. uint16x8x3_t __ret; \
  9021. __builtin_neon_vld3q_v(&__ret, __p0, 49); \
  9022. __ret; \
  9023. })
  9024. #else
  9025. #define vld3q_u16(__p0) __extension__ ({ \
  9026. uint16x8x3_t __ret; \
  9027. __builtin_neon_vld3q_v(&__ret, __p0, 49); \
  9028. \
  9029. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9030. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9031. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9032. __ret; \
  9033. })
  9034. #endif
  9035. #ifdef __LITTLE_ENDIAN__
  9036. #define vld3q_s8(__p0) __extension__ ({ \
  9037. int8x16x3_t __ret; \
  9038. __builtin_neon_vld3q_v(&__ret, __p0, 32); \
  9039. __ret; \
  9040. })
  9041. #else
  9042. #define vld3q_s8(__p0) __extension__ ({ \
  9043. int8x16x3_t __ret; \
  9044. __builtin_neon_vld3q_v(&__ret, __p0, 32); \
  9045. \
  9046. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9047. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9048. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9049. __ret; \
  9050. })
  9051. #endif
  9052. #ifdef __LITTLE_ENDIAN__
  9053. #define vld3q_f32(__p0) __extension__ ({ \
  9054. float32x4x3_t __ret; \
  9055. __builtin_neon_vld3q_v(&__ret, __p0, 41); \
  9056. __ret; \
  9057. })
  9058. #else
  9059. #define vld3q_f32(__p0) __extension__ ({ \
  9060. float32x4x3_t __ret; \
  9061. __builtin_neon_vld3q_v(&__ret, __p0, 41); \
  9062. \
  9063. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9064. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9065. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9066. __ret; \
  9067. })
  9068. #endif
  9069. #ifdef __LITTLE_ENDIAN__
  9070. #define vld3q_f16(__p0) __extension__ ({ \
  9071. float16x8x3_t __ret; \
  9072. __builtin_neon_vld3q_v(&__ret, __p0, 40); \
  9073. __ret; \
  9074. })
  9075. #else
  9076. #define vld3q_f16(__p0) __extension__ ({ \
  9077. float16x8x3_t __ret; \
  9078. __builtin_neon_vld3q_v(&__ret, __p0, 40); \
  9079. \
  9080. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9081. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9082. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9083. __ret; \
  9084. })
  9085. #endif
  9086. #ifdef __LITTLE_ENDIAN__
  9087. #define vld3q_s32(__p0) __extension__ ({ \
  9088. int32x4x3_t __ret; \
  9089. __builtin_neon_vld3q_v(&__ret, __p0, 34); \
  9090. __ret; \
  9091. })
  9092. #else
  9093. #define vld3q_s32(__p0) __extension__ ({ \
  9094. int32x4x3_t __ret; \
  9095. __builtin_neon_vld3q_v(&__ret, __p0, 34); \
  9096. \
  9097. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9098. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9099. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9100. __ret; \
  9101. })
  9102. #endif
  9103. #ifdef __LITTLE_ENDIAN__
  9104. #define vld3q_s16(__p0) __extension__ ({ \
  9105. int16x8x3_t __ret; \
  9106. __builtin_neon_vld3q_v(&__ret, __p0, 33); \
  9107. __ret; \
  9108. })
  9109. #else
  9110. #define vld3q_s16(__p0) __extension__ ({ \
  9111. int16x8x3_t __ret; \
  9112. __builtin_neon_vld3q_v(&__ret, __p0, 33); \
  9113. \
  9114. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9115. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9116. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9117. __ret; \
  9118. })
  9119. #endif
  9120. #ifdef __LITTLE_ENDIAN__
  9121. #define vld3_u8(__p0) __extension__ ({ \
  9122. uint8x8x3_t __ret; \
  9123. __builtin_neon_vld3_v(&__ret, __p0, 16); \
  9124. __ret; \
  9125. })
  9126. #else
  9127. #define vld3_u8(__p0) __extension__ ({ \
  9128. uint8x8x3_t __ret; \
  9129. __builtin_neon_vld3_v(&__ret, __p0, 16); \
  9130. \
  9131. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9132. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9133. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9134. __ret; \
  9135. })
  9136. #endif
  9137. #ifdef __LITTLE_ENDIAN__
  9138. #define vld3_u32(__p0) __extension__ ({ \
  9139. uint32x2x3_t __ret; \
  9140. __builtin_neon_vld3_v(&__ret, __p0, 18); \
  9141. __ret; \
  9142. })
  9143. #else
  9144. #define vld3_u32(__p0) __extension__ ({ \
  9145. uint32x2x3_t __ret; \
  9146. __builtin_neon_vld3_v(&__ret, __p0, 18); \
  9147. \
  9148. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9149. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9150. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9151. __ret; \
  9152. })
  9153. #endif
  9154. #ifdef __LITTLE_ENDIAN__
  9155. #define vld3_u64(__p0) __extension__ ({ \
  9156. uint64x1x3_t __ret; \
  9157. __builtin_neon_vld3_v(&__ret, __p0, 19); \
  9158. __ret; \
  9159. })
  9160. #else
  9161. #define vld3_u64(__p0) __extension__ ({ \
  9162. uint64x1x3_t __ret; \
  9163. __builtin_neon_vld3_v(&__ret, __p0, 19); \
  9164. __ret; \
  9165. })
  9166. #endif
  9167. #ifdef __LITTLE_ENDIAN__
  9168. #define vld3_u16(__p0) __extension__ ({ \
  9169. uint16x4x3_t __ret; \
  9170. __builtin_neon_vld3_v(&__ret, __p0, 17); \
  9171. __ret; \
  9172. })
  9173. #else
  9174. #define vld3_u16(__p0) __extension__ ({ \
  9175. uint16x4x3_t __ret; \
  9176. __builtin_neon_vld3_v(&__ret, __p0, 17); \
  9177. \
  9178. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9179. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9180. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9181. __ret; \
  9182. })
  9183. #endif
  9184. #ifdef __LITTLE_ENDIAN__
  9185. #define vld3_s8(__p0) __extension__ ({ \
  9186. int8x8x3_t __ret; \
  9187. __builtin_neon_vld3_v(&__ret, __p0, 0); \
  9188. __ret; \
  9189. })
  9190. #else
  9191. #define vld3_s8(__p0) __extension__ ({ \
  9192. int8x8x3_t __ret; \
  9193. __builtin_neon_vld3_v(&__ret, __p0, 0); \
  9194. \
  9195. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9196. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9197. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9198. __ret; \
  9199. })
  9200. #endif
  9201. #ifdef __LITTLE_ENDIAN__
  9202. #define vld3_f32(__p0) __extension__ ({ \
  9203. float32x2x3_t __ret; \
  9204. __builtin_neon_vld3_v(&__ret, __p0, 9); \
  9205. __ret; \
  9206. })
  9207. #else
  9208. #define vld3_f32(__p0) __extension__ ({ \
  9209. float32x2x3_t __ret; \
  9210. __builtin_neon_vld3_v(&__ret, __p0, 9); \
  9211. \
  9212. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9213. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9214. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9215. __ret; \
  9216. })
  9217. #endif
  9218. #ifdef __LITTLE_ENDIAN__
  9219. #define vld3_f16(__p0) __extension__ ({ \
  9220. float16x4x3_t __ret; \
  9221. __builtin_neon_vld3_v(&__ret, __p0, 8); \
  9222. __ret; \
  9223. })
  9224. #else
  9225. #define vld3_f16(__p0) __extension__ ({ \
  9226. float16x4x3_t __ret; \
  9227. __builtin_neon_vld3_v(&__ret, __p0, 8); \
  9228. \
  9229. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9230. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9231. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9232. __ret; \
  9233. })
  9234. #endif
  9235. #ifdef __LITTLE_ENDIAN__
  9236. #define vld3_s32(__p0) __extension__ ({ \
  9237. int32x2x3_t __ret; \
  9238. __builtin_neon_vld3_v(&__ret, __p0, 2); \
  9239. __ret; \
  9240. })
  9241. #else
  9242. #define vld3_s32(__p0) __extension__ ({ \
  9243. int32x2x3_t __ret; \
  9244. __builtin_neon_vld3_v(&__ret, __p0, 2); \
  9245. \
  9246. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9247. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9248. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9249. __ret; \
  9250. })
  9251. #endif
  9252. #ifdef __LITTLE_ENDIAN__
  9253. #define vld3_s64(__p0) __extension__ ({ \
  9254. int64x1x3_t __ret; \
  9255. __builtin_neon_vld3_v(&__ret, __p0, 3); \
  9256. __ret; \
  9257. })
  9258. #else
  9259. #define vld3_s64(__p0) __extension__ ({ \
  9260. int64x1x3_t __ret; \
  9261. __builtin_neon_vld3_v(&__ret, __p0, 3); \
  9262. __ret; \
  9263. })
  9264. #endif
  9265. #ifdef __LITTLE_ENDIAN__
  9266. #define vld3_s16(__p0) __extension__ ({ \
  9267. int16x4x3_t __ret; \
  9268. __builtin_neon_vld3_v(&__ret, __p0, 1); \
  9269. __ret; \
  9270. })
  9271. #else
  9272. #define vld3_s16(__p0) __extension__ ({ \
  9273. int16x4x3_t __ret; \
  9274. __builtin_neon_vld3_v(&__ret, __p0, 1); \
  9275. \
  9276. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9277. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9278. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9279. __ret; \
  9280. })
  9281. #endif
  9282. #ifdef __LITTLE_ENDIAN__
  9283. #define vld3_dup_p8(__p0) __extension__ ({ \
  9284. poly8x8x3_t __ret; \
  9285. __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
  9286. __ret; \
  9287. })
  9288. #else
  9289. #define vld3_dup_p8(__p0) __extension__ ({ \
  9290. poly8x8x3_t __ret; \
  9291. __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
  9292. \
  9293. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9294. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9295. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9296. __ret; \
  9297. })
  9298. #endif
  9299. #ifdef __LITTLE_ENDIAN__
  9300. #define vld3_dup_p16(__p0) __extension__ ({ \
  9301. poly16x4x3_t __ret; \
  9302. __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
  9303. __ret; \
  9304. })
  9305. #else
  9306. #define vld3_dup_p16(__p0) __extension__ ({ \
  9307. poly16x4x3_t __ret; \
  9308. __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
  9309. \
  9310. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9311. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9312. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9313. __ret; \
  9314. })
  9315. #endif
  9316. #ifdef __LITTLE_ENDIAN__
  9317. #define vld3_dup_u8(__p0) __extension__ ({ \
  9318. uint8x8x3_t __ret; \
  9319. __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
  9320. __ret; \
  9321. })
  9322. #else
  9323. #define vld3_dup_u8(__p0) __extension__ ({ \
  9324. uint8x8x3_t __ret; \
  9325. __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
  9326. \
  9327. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9328. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9329. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9330. __ret; \
  9331. })
  9332. #endif
  9333. #ifdef __LITTLE_ENDIAN__
  9334. #define vld3_dup_u32(__p0) __extension__ ({ \
  9335. uint32x2x3_t __ret; \
  9336. __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
  9337. __ret; \
  9338. })
  9339. #else
  9340. #define vld3_dup_u32(__p0) __extension__ ({ \
  9341. uint32x2x3_t __ret; \
  9342. __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
  9343. \
  9344. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9345. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9346. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9347. __ret; \
  9348. })
  9349. #endif
  9350. #ifdef __LITTLE_ENDIAN__
  9351. #define vld3_dup_u64(__p0) __extension__ ({ \
  9352. uint64x1x3_t __ret; \
  9353. __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
  9354. __ret; \
  9355. })
  9356. #else
  9357. #define vld3_dup_u64(__p0) __extension__ ({ \
  9358. uint64x1x3_t __ret; \
  9359. __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
  9360. __ret; \
  9361. })
  9362. #endif
  9363. #ifdef __LITTLE_ENDIAN__
  9364. #define vld3_dup_u16(__p0) __extension__ ({ \
  9365. uint16x4x3_t __ret; \
  9366. __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
  9367. __ret; \
  9368. })
  9369. #else
  9370. #define vld3_dup_u16(__p0) __extension__ ({ \
  9371. uint16x4x3_t __ret; \
  9372. __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
  9373. \
  9374. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9375. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9376. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9377. __ret; \
  9378. })
  9379. #endif
  9380. #ifdef __LITTLE_ENDIAN__
  9381. #define vld3_dup_s8(__p0) __extension__ ({ \
  9382. int8x8x3_t __ret; \
  9383. __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
  9384. __ret; \
  9385. })
  9386. #else
  9387. #define vld3_dup_s8(__p0) __extension__ ({ \
  9388. int8x8x3_t __ret; \
  9389. __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
  9390. \
  9391. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9392. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9393. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9394. __ret; \
  9395. })
  9396. #endif
  9397. #ifdef __LITTLE_ENDIAN__
  9398. #define vld3_dup_f32(__p0) __extension__ ({ \
  9399. float32x2x3_t __ret; \
  9400. __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
  9401. __ret; \
  9402. })
  9403. #else
  9404. #define vld3_dup_f32(__p0) __extension__ ({ \
  9405. float32x2x3_t __ret; \
  9406. __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
  9407. \
  9408. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9409. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9410. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9411. __ret; \
  9412. })
  9413. #endif
  9414. #ifdef __LITTLE_ENDIAN__
  9415. #define vld3_dup_f16(__p0) __extension__ ({ \
  9416. float16x4x3_t __ret; \
  9417. __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
  9418. __ret; \
  9419. })
  9420. #else
  9421. #define vld3_dup_f16(__p0) __extension__ ({ \
  9422. float16x4x3_t __ret; \
  9423. __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
  9424. \
  9425. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9426. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9427. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9428. __ret; \
  9429. })
  9430. #endif
  9431. #ifdef __LITTLE_ENDIAN__
  9432. #define vld3_dup_s32(__p0) __extension__ ({ \
  9433. int32x2x3_t __ret; \
  9434. __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
  9435. __ret; \
  9436. })
  9437. #else
  9438. #define vld3_dup_s32(__p0) __extension__ ({ \
  9439. int32x2x3_t __ret; \
  9440. __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
  9441. \
  9442. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9443. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9444. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9445. __ret; \
  9446. })
  9447. #endif
  9448. #ifdef __LITTLE_ENDIAN__
  9449. #define vld3_dup_s64(__p0) __extension__ ({ \
  9450. int64x1x3_t __ret; \
  9451. __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
  9452. __ret; \
  9453. })
  9454. #else
  9455. #define vld3_dup_s64(__p0) __extension__ ({ \
  9456. int64x1x3_t __ret; \
  9457. __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
  9458. __ret; \
  9459. })
  9460. #endif
  9461. #ifdef __LITTLE_ENDIAN__
  9462. #define vld3_dup_s16(__p0) __extension__ ({ \
  9463. int16x4x3_t __ret; \
  9464. __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
  9465. __ret; \
  9466. })
  9467. #else
  9468. #define vld3_dup_s16(__p0) __extension__ ({ \
  9469. int16x4x3_t __ret; \
  9470. __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
  9471. \
  9472. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9473. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9474. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9475. __ret; \
  9476. })
  9477. #endif
  9478. #ifdef __LITTLE_ENDIAN__
  9479. #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  9480. poly8x8x3_t __s1 = __p1; \
  9481. poly8x8x3_t __ret; \
  9482. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
  9483. __ret; \
  9484. })
  9485. #else
  9486. #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  9487. poly8x8x3_t __s1 = __p1; \
  9488. poly8x8x3_t __rev1; \
  9489. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9490. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9491. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9492. poly8x8x3_t __ret; \
  9493. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
  9494. \
  9495. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9496. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9497. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9498. __ret; \
  9499. })
  9500. #endif
  9501. #ifdef __LITTLE_ENDIAN__
  9502. #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  9503. poly16x4x3_t __s1 = __p1; \
  9504. poly16x4x3_t __ret; \
  9505. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
  9506. __ret; \
  9507. })
  9508. #else
  9509. #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  9510. poly16x4x3_t __s1 = __p1; \
  9511. poly16x4x3_t __rev1; \
  9512. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9513. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9514. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9515. poly16x4x3_t __ret; \
  9516. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
  9517. \
  9518. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9519. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9520. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9521. __ret; \
  9522. })
  9523. #endif
  9524. #ifdef __LITTLE_ENDIAN__
  9525. #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  9526. poly16x8x3_t __s1 = __p1; \
  9527. poly16x8x3_t __ret; \
  9528. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
  9529. __ret; \
  9530. })
  9531. #else
  9532. #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  9533. poly16x8x3_t __s1 = __p1; \
  9534. poly16x8x3_t __rev1; \
  9535. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9536. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9537. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9538. poly16x8x3_t __ret; \
  9539. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
  9540. \
  9541. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9542. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9543. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9544. __ret; \
  9545. })
  9546. #endif
  9547. #ifdef __LITTLE_ENDIAN__
  9548. #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  9549. uint32x4x3_t __s1 = __p1; \
  9550. uint32x4x3_t __ret; \
  9551. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
  9552. __ret; \
  9553. })
  9554. #else
  9555. #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  9556. uint32x4x3_t __s1 = __p1; \
  9557. uint32x4x3_t __rev1; \
  9558. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9559. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9560. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9561. uint32x4x3_t __ret; \
  9562. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
  9563. \
  9564. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9565. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9566. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9567. __ret; \
  9568. })
  9569. #endif
  9570. #ifdef __LITTLE_ENDIAN__
  9571. #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  9572. uint16x8x3_t __s1 = __p1; \
  9573. uint16x8x3_t __ret; \
  9574. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
  9575. __ret; \
  9576. })
  9577. #else
  9578. #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  9579. uint16x8x3_t __s1 = __p1; \
  9580. uint16x8x3_t __rev1; \
  9581. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9582. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9583. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9584. uint16x8x3_t __ret; \
  9585. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
  9586. \
  9587. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9588. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9589. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9590. __ret; \
  9591. })
  9592. #endif
  9593. #ifdef __LITTLE_ENDIAN__
  9594. #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  9595. float32x4x3_t __s1 = __p1; \
  9596. float32x4x3_t __ret; \
  9597. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
  9598. __ret; \
  9599. })
  9600. #else
  9601. #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  9602. float32x4x3_t __s1 = __p1; \
  9603. float32x4x3_t __rev1; \
  9604. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9605. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9606. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9607. float32x4x3_t __ret; \
  9608. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
  9609. \
  9610. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9611. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9612. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9613. __ret; \
  9614. })
  9615. #endif
  9616. #ifdef __LITTLE_ENDIAN__
  9617. #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  9618. float16x8x3_t __s1 = __p1; \
  9619. float16x8x3_t __ret; \
  9620. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
  9621. __ret; \
  9622. })
  9623. #else
  9624. #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  9625. float16x8x3_t __s1 = __p1; \
  9626. float16x8x3_t __rev1; \
  9627. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9628. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9629. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9630. float16x8x3_t __ret; \
  9631. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
  9632. \
  9633. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9634. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9635. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9636. __ret; \
  9637. })
  9638. #endif
  9639. #ifdef __LITTLE_ENDIAN__
  9640. #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  9641. int32x4x3_t __s1 = __p1; \
  9642. int32x4x3_t __ret; \
  9643. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
  9644. __ret; \
  9645. })
  9646. #else
  9647. #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  9648. int32x4x3_t __s1 = __p1; \
  9649. int32x4x3_t __rev1; \
  9650. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9651. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9652. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9653. int32x4x3_t __ret; \
  9654. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
  9655. \
  9656. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9657. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9658. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9659. __ret; \
  9660. })
  9661. #endif
  9662. #ifdef __LITTLE_ENDIAN__
  9663. #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  9664. int16x8x3_t __s1 = __p1; \
  9665. int16x8x3_t __ret; \
  9666. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
  9667. __ret; \
  9668. })
  9669. #else
  9670. #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  9671. int16x8x3_t __s1 = __p1; \
  9672. int16x8x3_t __rev1; \
  9673. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9674. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9675. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9676. int16x8x3_t __ret; \
  9677. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
  9678. \
  9679. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9680. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9681. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9682. __ret; \
  9683. })
  9684. #endif
  9685. #ifdef __LITTLE_ENDIAN__
  9686. #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  9687. uint8x8x3_t __s1 = __p1; \
  9688. uint8x8x3_t __ret; \
  9689. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
  9690. __ret; \
  9691. })
  9692. #else
  9693. #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  9694. uint8x8x3_t __s1 = __p1; \
  9695. uint8x8x3_t __rev1; \
  9696. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9697. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9698. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9699. uint8x8x3_t __ret; \
  9700. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
  9701. \
  9702. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9703. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9704. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9705. __ret; \
  9706. })
  9707. #endif
  9708. #ifdef __LITTLE_ENDIAN__
  9709. #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  9710. uint32x2x3_t __s1 = __p1; \
  9711. uint32x2x3_t __ret; \
  9712. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
  9713. __ret; \
  9714. })
  9715. #else
  9716. #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  9717. uint32x2x3_t __s1 = __p1; \
  9718. uint32x2x3_t __rev1; \
  9719. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  9720. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  9721. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  9722. uint32x2x3_t __ret; \
  9723. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
  9724. \
  9725. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9726. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9727. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9728. __ret; \
  9729. })
  9730. #endif
  9731. #ifdef __LITTLE_ENDIAN__
  9732. #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  9733. uint16x4x3_t __s1 = __p1; \
  9734. uint16x4x3_t __ret; \
  9735. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
  9736. __ret; \
  9737. })
  9738. #else
  9739. #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  9740. uint16x4x3_t __s1 = __p1; \
  9741. uint16x4x3_t __rev1; \
  9742. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9743. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9744. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9745. uint16x4x3_t __ret; \
  9746. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
  9747. \
  9748. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9749. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9750. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9751. __ret; \
  9752. })
  9753. #endif
  9754. #ifdef __LITTLE_ENDIAN__
  9755. #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  9756. int8x8x3_t __s1 = __p1; \
  9757. int8x8x3_t __ret; \
  9758. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
  9759. __ret; \
  9760. })
  9761. #else
  9762. #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  9763. int8x8x3_t __s1 = __p1; \
  9764. int8x8x3_t __rev1; \
  9765. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9766. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9767. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9768. int8x8x3_t __ret; \
  9769. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
  9770. \
  9771. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9772. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9773. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9774. __ret; \
  9775. })
  9776. #endif
  9777. #ifdef __LITTLE_ENDIAN__
  9778. #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  9779. float32x2x3_t __s1 = __p1; \
  9780. float32x2x3_t __ret; \
  9781. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
  9782. __ret; \
  9783. })
  9784. #else
  9785. #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  9786. float32x2x3_t __s1 = __p1; \
  9787. float32x2x3_t __rev1; \
  9788. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  9789. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  9790. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  9791. float32x2x3_t __ret; \
  9792. __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
  9793. \
  9794. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9795. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9796. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9797. __ret; \
  9798. })
  9799. #endif
  9800. #ifdef __LITTLE_ENDIAN__
  9801. #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  9802. float16x4x3_t __s1 = __p1; \
  9803. float16x4x3_t __ret; \
  9804. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
  9805. __ret; \
  9806. })
  9807. #else
  9808. #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  9809. float16x4x3_t __s1 = __p1; \
  9810. float16x4x3_t __rev1; \
  9811. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9812. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9813. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9814. float16x4x3_t __ret; \
  9815. __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
  9816. \
  9817. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9818. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9819. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9820. __ret; \
  9821. })
  9822. #endif
  9823. #ifdef __LITTLE_ENDIAN__
  9824. #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  9825. int32x2x3_t __s1 = __p1; \
  9826. int32x2x3_t __ret; \
  9827. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
  9828. __ret; \
  9829. })
  9830. #else
  9831. #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  9832. int32x2x3_t __s1 = __p1; \
  9833. int32x2x3_t __rev1; \
  9834. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  9835. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  9836. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  9837. int32x2x3_t __ret; \
  9838. __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
  9839. \
  9840. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  9841. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  9842. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  9843. __ret; \
  9844. })
  9845. #endif
  9846. #ifdef __LITTLE_ENDIAN__
  9847. #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  9848. int16x4x3_t __s1 = __p1; \
  9849. int16x4x3_t __ret; \
  9850. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
  9851. __ret; \
  9852. })
  9853. #else
  9854. #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  9855. int16x4x3_t __s1 = __p1; \
  9856. int16x4x3_t __rev1; \
  9857. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  9858. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  9859. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  9860. int16x4x3_t __ret; \
  9861. __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
  9862. \
  9863. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9864. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9865. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9866. __ret; \
  9867. })
  9868. #endif
  9869. #ifdef __LITTLE_ENDIAN__
  9870. #define vld4_p8(__p0) __extension__ ({ \
  9871. poly8x8x4_t __ret; \
  9872. __builtin_neon_vld4_v(&__ret, __p0, 4); \
  9873. __ret; \
  9874. })
  9875. #else
  9876. #define vld4_p8(__p0) __extension__ ({ \
  9877. poly8x8x4_t __ret; \
  9878. __builtin_neon_vld4_v(&__ret, __p0, 4); \
  9879. \
  9880. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9881. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9882. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9883. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  9884. __ret; \
  9885. })
  9886. #endif
  9887. #ifdef __LITTLE_ENDIAN__
  9888. #define vld4_p16(__p0) __extension__ ({ \
  9889. poly16x4x4_t __ret; \
  9890. __builtin_neon_vld4_v(&__ret, __p0, 5); \
  9891. __ret; \
  9892. })
  9893. #else
  9894. #define vld4_p16(__p0) __extension__ ({ \
  9895. poly16x4x4_t __ret; \
  9896. __builtin_neon_vld4_v(&__ret, __p0, 5); \
  9897. \
  9898. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9899. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9900. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9901. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  9902. __ret; \
  9903. })
  9904. #endif
  9905. #ifdef __LITTLE_ENDIAN__
  9906. #define vld4q_p8(__p0) __extension__ ({ \
  9907. poly8x16x4_t __ret; \
  9908. __builtin_neon_vld4q_v(&__ret, __p0, 36); \
  9909. __ret; \
  9910. })
  9911. #else
  9912. #define vld4q_p8(__p0) __extension__ ({ \
  9913. poly8x16x4_t __ret; \
  9914. __builtin_neon_vld4q_v(&__ret, __p0, 36); \
  9915. \
  9916. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9917. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9918. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9919. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9920. __ret; \
  9921. })
  9922. #endif
  9923. #ifdef __LITTLE_ENDIAN__
  9924. #define vld4q_p16(__p0) __extension__ ({ \
  9925. poly16x8x4_t __ret; \
  9926. __builtin_neon_vld4q_v(&__ret, __p0, 37); \
  9927. __ret; \
  9928. })
  9929. #else
  9930. #define vld4q_p16(__p0) __extension__ ({ \
  9931. poly16x8x4_t __ret; \
  9932. __builtin_neon_vld4q_v(&__ret, __p0, 37); \
  9933. \
  9934. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9935. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9936. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9937. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  9938. __ret; \
  9939. })
  9940. #endif
  9941. #ifdef __LITTLE_ENDIAN__
  9942. #define vld4q_u8(__p0) __extension__ ({ \
  9943. uint8x16x4_t __ret; \
  9944. __builtin_neon_vld4q_v(&__ret, __p0, 48); \
  9945. __ret; \
  9946. })
  9947. #else
  9948. #define vld4q_u8(__p0) __extension__ ({ \
  9949. uint8x16x4_t __ret; \
  9950. __builtin_neon_vld4q_v(&__ret, __p0, 48); \
  9951. \
  9952. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9953. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9954. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9955. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  9956. __ret; \
  9957. })
  9958. #endif
  9959. #ifdef __LITTLE_ENDIAN__
  9960. #define vld4q_u32(__p0) __extension__ ({ \
  9961. uint32x4x4_t __ret; \
  9962. __builtin_neon_vld4q_v(&__ret, __p0, 50); \
  9963. __ret; \
  9964. })
  9965. #else
  9966. #define vld4q_u32(__p0) __extension__ ({ \
  9967. uint32x4x4_t __ret; \
  9968. __builtin_neon_vld4q_v(&__ret, __p0, 50); \
  9969. \
  9970. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  9971. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  9972. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  9973. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  9974. __ret; \
  9975. })
  9976. #endif
  9977. #ifdef __LITTLE_ENDIAN__
  9978. #define vld4q_u16(__p0) __extension__ ({ \
  9979. uint16x8x4_t __ret; \
  9980. __builtin_neon_vld4q_v(&__ret, __p0, 49); \
  9981. __ret; \
  9982. })
  9983. #else
  9984. #define vld4q_u16(__p0) __extension__ ({ \
  9985. uint16x8x4_t __ret; \
  9986. __builtin_neon_vld4q_v(&__ret, __p0, 49); \
  9987. \
  9988. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  9989. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  9990. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  9991. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  9992. __ret; \
  9993. })
  9994. #endif
  9995. #ifdef __LITTLE_ENDIAN__
  9996. #define vld4q_s8(__p0) __extension__ ({ \
  9997. int8x16x4_t __ret; \
  9998. __builtin_neon_vld4q_v(&__ret, __p0, 32); \
  9999. __ret; \
  10000. })
  10001. #else
  10002. #define vld4q_s8(__p0) __extension__ ({ \
  10003. int8x16x4_t __ret; \
  10004. __builtin_neon_vld4q_v(&__ret, __p0, 32); \
  10005. \
  10006. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  10007. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  10008. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  10009. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  10010. __ret; \
  10011. })
  10012. #endif
  10013. #ifdef __LITTLE_ENDIAN__
  10014. #define vld4q_f32(__p0) __extension__ ({ \
  10015. float32x4x4_t __ret; \
  10016. __builtin_neon_vld4q_v(&__ret, __p0, 41); \
  10017. __ret; \
  10018. })
  10019. #else
  10020. #define vld4q_f32(__p0) __extension__ ({ \
  10021. float32x4x4_t __ret; \
  10022. __builtin_neon_vld4q_v(&__ret, __p0, 41); \
  10023. \
  10024. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10025. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10026. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10027. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10028. __ret; \
  10029. })
  10030. #endif
  10031. #ifdef __LITTLE_ENDIAN__
  10032. #define vld4q_f16(__p0) __extension__ ({ \
  10033. float16x8x4_t __ret; \
  10034. __builtin_neon_vld4q_v(&__ret, __p0, 40); \
  10035. __ret; \
  10036. })
  10037. #else
  10038. #define vld4q_f16(__p0) __extension__ ({ \
  10039. float16x8x4_t __ret; \
  10040. __builtin_neon_vld4q_v(&__ret, __p0, 40); \
  10041. \
  10042. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10043. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10044. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10045. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10046. __ret; \
  10047. })
  10048. #endif
  10049. #ifdef __LITTLE_ENDIAN__
  10050. #define vld4q_s32(__p0) __extension__ ({ \
  10051. int32x4x4_t __ret; \
  10052. __builtin_neon_vld4q_v(&__ret, __p0, 34); \
  10053. __ret; \
  10054. })
  10055. #else
  10056. #define vld4q_s32(__p0) __extension__ ({ \
  10057. int32x4x4_t __ret; \
  10058. __builtin_neon_vld4q_v(&__ret, __p0, 34); \
  10059. \
  10060. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10061. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10062. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10063. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10064. __ret; \
  10065. })
  10066. #endif
  10067. #ifdef __LITTLE_ENDIAN__
  10068. #define vld4q_s16(__p0) __extension__ ({ \
  10069. int16x8x4_t __ret; \
  10070. __builtin_neon_vld4q_v(&__ret, __p0, 33); \
  10071. __ret; \
  10072. })
  10073. #else
  10074. #define vld4q_s16(__p0) __extension__ ({ \
  10075. int16x8x4_t __ret; \
  10076. __builtin_neon_vld4q_v(&__ret, __p0, 33); \
  10077. \
  10078. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10079. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10080. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10081. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10082. __ret; \
  10083. })
  10084. #endif
  10085. #ifdef __LITTLE_ENDIAN__
  10086. #define vld4_u8(__p0) __extension__ ({ \
  10087. uint8x8x4_t __ret; \
  10088. __builtin_neon_vld4_v(&__ret, __p0, 16); \
  10089. __ret; \
  10090. })
  10091. #else
  10092. #define vld4_u8(__p0) __extension__ ({ \
  10093. uint8x8x4_t __ret; \
  10094. __builtin_neon_vld4_v(&__ret, __p0, 16); \
  10095. \
  10096. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10097. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10098. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10099. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10100. __ret; \
  10101. })
  10102. #endif
  10103. #ifdef __LITTLE_ENDIAN__
  10104. #define vld4_u32(__p0) __extension__ ({ \
  10105. uint32x2x4_t __ret; \
  10106. __builtin_neon_vld4_v(&__ret, __p0, 18); \
  10107. __ret; \
  10108. })
  10109. #else
  10110. #define vld4_u32(__p0) __extension__ ({ \
  10111. uint32x2x4_t __ret; \
  10112. __builtin_neon_vld4_v(&__ret, __p0, 18); \
  10113. \
  10114. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10115. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10116. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10117. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10118. __ret; \
  10119. })
  10120. #endif
  10121. #ifdef __LITTLE_ENDIAN__
  10122. #define vld4_u64(__p0) __extension__ ({ \
  10123. uint64x1x4_t __ret; \
  10124. __builtin_neon_vld4_v(&__ret, __p0, 19); \
  10125. __ret; \
  10126. })
  10127. #else
  10128. #define vld4_u64(__p0) __extension__ ({ \
  10129. uint64x1x4_t __ret; \
  10130. __builtin_neon_vld4_v(&__ret, __p0, 19); \
  10131. __ret; \
  10132. })
  10133. #endif
  10134. #ifdef __LITTLE_ENDIAN__
  10135. #define vld4_u16(__p0) __extension__ ({ \
  10136. uint16x4x4_t __ret; \
  10137. __builtin_neon_vld4_v(&__ret, __p0, 17); \
  10138. __ret; \
  10139. })
  10140. #else
  10141. #define vld4_u16(__p0) __extension__ ({ \
  10142. uint16x4x4_t __ret; \
  10143. __builtin_neon_vld4_v(&__ret, __p0, 17); \
  10144. \
  10145. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10146. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10147. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10148. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10149. __ret; \
  10150. })
  10151. #endif
  10152. #ifdef __LITTLE_ENDIAN__
  10153. #define vld4_s8(__p0) __extension__ ({ \
  10154. int8x8x4_t __ret; \
  10155. __builtin_neon_vld4_v(&__ret, __p0, 0); \
  10156. __ret; \
  10157. })
  10158. #else
  10159. #define vld4_s8(__p0) __extension__ ({ \
  10160. int8x8x4_t __ret; \
  10161. __builtin_neon_vld4_v(&__ret, __p0, 0); \
  10162. \
  10163. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10164. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10165. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10166. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10167. __ret; \
  10168. })
  10169. #endif
  10170. #ifdef __LITTLE_ENDIAN__
  10171. #define vld4_f32(__p0) __extension__ ({ \
  10172. float32x2x4_t __ret; \
  10173. __builtin_neon_vld4_v(&__ret, __p0, 9); \
  10174. __ret; \
  10175. })
  10176. #else
  10177. #define vld4_f32(__p0) __extension__ ({ \
  10178. float32x2x4_t __ret; \
  10179. __builtin_neon_vld4_v(&__ret, __p0, 9); \
  10180. \
  10181. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10182. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10183. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10184. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10185. __ret; \
  10186. })
  10187. #endif
  10188. #ifdef __LITTLE_ENDIAN__
  10189. #define vld4_f16(__p0) __extension__ ({ \
  10190. float16x4x4_t __ret; \
  10191. __builtin_neon_vld4_v(&__ret, __p0, 8); \
  10192. __ret; \
  10193. })
  10194. #else
  10195. #define vld4_f16(__p0) __extension__ ({ \
  10196. float16x4x4_t __ret; \
  10197. __builtin_neon_vld4_v(&__ret, __p0, 8); \
  10198. \
  10199. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10200. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10201. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10202. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10203. __ret; \
  10204. })
  10205. #endif
  10206. #ifdef __LITTLE_ENDIAN__
  10207. #define vld4_s32(__p0) __extension__ ({ \
  10208. int32x2x4_t __ret; \
  10209. __builtin_neon_vld4_v(&__ret, __p0, 2); \
  10210. __ret; \
  10211. })
  10212. #else
  10213. #define vld4_s32(__p0) __extension__ ({ \
  10214. int32x2x4_t __ret; \
  10215. __builtin_neon_vld4_v(&__ret, __p0, 2); \
  10216. \
  10217. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10218. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10219. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10220. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10221. __ret; \
  10222. })
  10223. #endif
  10224. #ifdef __LITTLE_ENDIAN__
  10225. #define vld4_s64(__p0) __extension__ ({ \
  10226. int64x1x4_t __ret; \
  10227. __builtin_neon_vld4_v(&__ret, __p0, 3); \
  10228. __ret; \
  10229. })
  10230. #else
  10231. #define vld4_s64(__p0) __extension__ ({ \
  10232. int64x1x4_t __ret; \
  10233. __builtin_neon_vld4_v(&__ret, __p0, 3); \
  10234. __ret; \
  10235. })
  10236. #endif
  10237. #ifdef __LITTLE_ENDIAN__
  10238. #define vld4_s16(__p0) __extension__ ({ \
  10239. int16x4x4_t __ret; \
  10240. __builtin_neon_vld4_v(&__ret, __p0, 1); \
  10241. __ret; \
  10242. })
  10243. #else
  10244. #define vld4_s16(__p0) __extension__ ({ \
  10245. int16x4x4_t __ret; \
  10246. __builtin_neon_vld4_v(&__ret, __p0, 1); \
  10247. \
  10248. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10249. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10250. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10251. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10252. __ret; \
  10253. })
  10254. #endif
  10255. #ifdef __LITTLE_ENDIAN__
  10256. #define vld4_dup_p8(__p0) __extension__ ({ \
  10257. poly8x8x4_t __ret; \
  10258. __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
  10259. __ret; \
  10260. })
  10261. #else
  10262. #define vld4_dup_p8(__p0) __extension__ ({ \
  10263. poly8x8x4_t __ret; \
  10264. __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
  10265. \
  10266. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10267. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10268. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10269. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10270. __ret; \
  10271. })
  10272. #endif
  10273. #ifdef __LITTLE_ENDIAN__
  10274. #define vld4_dup_p16(__p0) __extension__ ({ \
  10275. poly16x4x4_t __ret; \
  10276. __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
  10277. __ret; \
  10278. })
  10279. #else
  10280. #define vld4_dup_p16(__p0) __extension__ ({ \
  10281. poly16x4x4_t __ret; \
  10282. __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
  10283. \
  10284. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10285. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10286. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10287. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10288. __ret; \
  10289. })
  10290. #endif
  10291. #ifdef __LITTLE_ENDIAN__
  10292. #define vld4_dup_u8(__p0) __extension__ ({ \
  10293. uint8x8x4_t __ret; \
  10294. __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
  10295. __ret; \
  10296. })
  10297. #else
  10298. #define vld4_dup_u8(__p0) __extension__ ({ \
  10299. uint8x8x4_t __ret; \
  10300. __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
  10301. \
  10302. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10303. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10304. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10305. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10306. __ret; \
  10307. })
  10308. #endif
  10309. #ifdef __LITTLE_ENDIAN__
  10310. #define vld4_dup_u32(__p0) __extension__ ({ \
  10311. uint32x2x4_t __ret; \
  10312. __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
  10313. __ret; \
  10314. })
  10315. #else
  10316. #define vld4_dup_u32(__p0) __extension__ ({ \
  10317. uint32x2x4_t __ret; \
  10318. __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
  10319. \
  10320. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10321. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10322. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10323. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10324. __ret; \
  10325. })
  10326. #endif
  10327. #ifdef __LITTLE_ENDIAN__
  10328. #define vld4_dup_u64(__p0) __extension__ ({ \
  10329. uint64x1x4_t __ret; \
  10330. __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
  10331. __ret; \
  10332. })
  10333. #else
  10334. #define vld4_dup_u64(__p0) __extension__ ({ \
  10335. uint64x1x4_t __ret; \
  10336. __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
  10337. __ret; \
  10338. })
  10339. #endif
  10340. #ifdef __LITTLE_ENDIAN__
  10341. #define vld4_dup_u16(__p0) __extension__ ({ \
  10342. uint16x4x4_t __ret; \
  10343. __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
  10344. __ret; \
  10345. })
  10346. #else
  10347. #define vld4_dup_u16(__p0) __extension__ ({ \
  10348. uint16x4x4_t __ret; \
  10349. __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
  10350. \
  10351. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10352. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10353. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10354. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10355. __ret; \
  10356. })
  10357. #endif
  10358. #ifdef __LITTLE_ENDIAN__
  10359. #define vld4_dup_s8(__p0) __extension__ ({ \
  10360. int8x8x4_t __ret; \
  10361. __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
  10362. __ret; \
  10363. })
  10364. #else
  10365. #define vld4_dup_s8(__p0) __extension__ ({ \
  10366. int8x8x4_t __ret; \
  10367. __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
  10368. \
  10369. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10370. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10371. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10372. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10373. __ret; \
  10374. })
  10375. #endif
  10376. #ifdef __LITTLE_ENDIAN__
  10377. #define vld4_dup_f32(__p0) __extension__ ({ \
  10378. float32x2x4_t __ret; \
  10379. __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
  10380. __ret; \
  10381. })
  10382. #else
  10383. #define vld4_dup_f32(__p0) __extension__ ({ \
  10384. float32x2x4_t __ret; \
  10385. __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
  10386. \
  10387. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10388. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10389. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10390. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10391. __ret; \
  10392. })
  10393. #endif
  10394. #ifdef __LITTLE_ENDIAN__
  10395. #define vld4_dup_f16(__p0) __extension__ ({ \
  10396. float16x4x4_t __ret; \
  10397. __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
  10398. __ret; \
  10399. })
  10400. #else
  10401. #define vld4_dup_f16(__p0) __extension__ ({ \
  10402. float16x4x4_t __ret; \
  10403. __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
  10404. \
  10405. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10406. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10407. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10408. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10409. __ret; \
  10410. })
  10411. #endif
  10412. #ifdef __LITTLE_ENDIAN__
  10413. #define vld4_dup_s32(__p0) __extension__ ({ \
  10414. int32x2x4_t __ret; \
  10415. __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
  10416. __ret; \
  10417. })
  10418. #else
  10419. #define vld4_dup_s32(__p0) __extension__ ({ \
  10420. int32x2x4_t __ret; \
  10421. __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
  10422. \
  10423. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10424. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10425. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10426. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10427. __ret; \
  10428. })
  10429. #endif
  10430. #ifdef __LITTLE_ENDIAN__
  10431. #define vld4_dup_s64(__p0) __extension__ ({ \
  10432. int64x1x4_t __ret; \
  10433. __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
  10434. __ret; \
  10435. })
  10436. #else
  10437. #define vld4_dup_s64(__p0) __extension__ ({ \
  10438. int64x1x4_t __ret; \
  10439. __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
  10440. __ret; \
  10441. })
  10442. #endif
  10443. #ifdef __LITTLE_ENDIAN__
  10444. #define vld4_dup_s16(__p0) __extension__ ({ \
  10445. int16x4x4_t __ret; \
  10446. __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
  10447. __ret; \
  10448. })
  10449. #else
  10450. #define vld4_dup_s16(__p0) __extension__ ({ \
  10451. int16x4x4_t __ret; \
  10452. __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
  10453. \
  10454. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10455. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10456. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10457. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10458. __ret; \
  10459. })
  10460. #endif
  10461. #ifdef __LITTLE_ENDIAN__
  10462. #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  10463. poly8x8x4_t __s1 = __p1; \
  10464. poly8x8x4_t __ret; \
  10465. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
  10466. __ret; \
  10467. })
  10468. #else
  10469. #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  10470. poly8x8x4_t __s1 = __p1; \
  10471. poly8x8x4_t __rev1; \
  10472. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10473. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10474. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10475. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10476. poly8x8x4_t __ret; \
  10477. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
  10478. \
  10479. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10480. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10481. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10482. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10483. __ret; \
  10484. })
  10485. #endif
  10486. #ifdef __LITTLE_ENDIAN__
  10487. #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  10488. poly16x4x4_t __s1 = __p1; \
  10489. poly16x4x4_t __ret; \
  10490. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
  10491. __ret; \
  10492. })
  10493. #else
  10494. #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  10495. poly16x4x4_t __s1 = __p1; \
  10496. poly16x4x4_t __rev1; \
  10497. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10498. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10499. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10500. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10501. poly16x4x4_t __ret; \
  10502. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
  10503. \
  10504. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10505. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10506. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10507. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10508. __ret; \
  10509. })
  10510. #endif
  10511. #ifdef __LITTLE_ENDIAN__
  10512. #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  10513. poly16x8x4_t __s1 = __p1; \
  10514. poly16x8x4_t __ret; \
  10515. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
  10516. __ret; \
  10517. })
  10518. #else
  10519. #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  10520. poly16x8x4_t __s1 = __p1; \
  10521. poly16x8x4_t __rev1; \
  10522. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10523. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10524. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10525. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10526. poly16x8x4_t __ret; \
  10527. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
  10528. \
  10529. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10530. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10531. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10532. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10533. __ret; \
  10534. })
  10535. #endif
  10536. #ifdef __LITTLE_ENDIAN__
  10537. #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  10538. uint32x4x4_t __s1 = __p1; \
  10539. uint32x4x4_t __ret; \
  10540. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
  10541. __ret; \
  10542. })
  10543. #else
  10544. #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  10545. uint32x4x4_t __s1 = __p1; \
  10546. uint32x4x4_t __rev1; \
  10547. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10548. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10549. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10550. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10551. uint32x4x4_t __ret; \
  10552. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
  10553. \
  10554. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10555. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10556. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10557. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10558. __ret; \
  10559. })
  10560. #endif
  10561. #ifdef __LITTLE_ENDIAN__
  10562. #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  10563. uint16x8x4_t __s1 = __p1; \
  10564. uint16x8x4_t __ret; \
  10565. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
  10566. __ret; \
  10567. })
  10568. #else
  10569. #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  10570. uint16x8x4_t __s1 = __p1; \
  10571. uint16x8x4_t __rev1; \
  10572. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10573. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10574. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10575. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10576. uint16x8x4_t __ret; \
  10577. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
  10578. \
  10579. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10580. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10581. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10582. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10583. __ret; \
  10584. })
  10585. #endif
  10586. #ifdef __LITTLE_ENDIAN__
  10587. #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  10588. float32x4x4_t __s1 = __p1; \
  10589. float32x4x4_t __ret; \
  10590. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
  10591. __ret; \
  10592. })
  10593. #else
  10594. #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  10595. float32x4x4_t __s1 = __p1; \
  10596. float32x4x4_t __rev1; \
  10597. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10598. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10599. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10600. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10601. float32x4x4_t __ret; \
  10602. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
  10603. \
  10604. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10605. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10606. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10607. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10608. __ret; \
  10609. })
  10610. #endif
  10611. #ifdef __LITTLE_ENDIAN__
  10612. #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  10613. float16x8x4_t __s1 = __p1; \
  10614. float16x8x4_t __ret; \
  10615. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
  10616. __ret; \
  10617. })
  10618. #else
  10619. #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  10620. float16x8x4_t __s1 = __p1; \
  10621. float16x8x4_t __rev1; \
  10622. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10623. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10624. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10625. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10626. float16x8x4_t __ret; \
  10627. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
  10628. \
  10629. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10630. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10631. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10632. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10633. __ret; \
  10634. })
  10635. #endif
  10636. #ifdef __LITTLE_ENDIAN__
  10637. #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  10638. int32x4x4_t __s1 = __p1; \
  10639. int32x4x4_t __ret; \
  10640. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
  10641. __ret; \
  10642. })
  10643. #else
  10644. #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  10645. int32x4x4_t __s1 = __p1; \
  10646. int32x4x4_t __rev1; \
  10647. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10648. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10649. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10650. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10651. int32x4x4_t __ret; \
  10652. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
  10653. \
  10654. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10655. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10656. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10657. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10658. __ret; \
  10659. })
  10660. #endif
  10661. #ifdef __LITTLE_ENDIAN__
  10662. #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  10663. int16x8x4_t __s1 = __p1; \
  10664. int16x8x4_t __ret; \
  10665. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
  10666. __ret; \
  10667. })
  10668. #else
  10669. #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  10670. int16x8x4_t __s1 = __p1; \
  10671. int16x8x4_t __rev1; \
  10672. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10673. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10674. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10675. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10676. int16x8x4_t __ret; \
  10677. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
  10678. \
  10679. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10680. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10681. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10682. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10683. __ret; \
  10684. })
  10685. #endif
  10686. #ifdef __LITTLE_ENDIAN__
  10687. #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  10688. uint8x8x4_t __s1 = __p1; \
  10689. uint8x8x4_t __ret; \
  10690. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
  10691. __ret; \
  10692. })
  10693. #else
  10694. #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  10695. uint8x8x4_t __s1 = __p1; \
  10696. uint8x8x4_t __rev1; \
  10697. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10698. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10699. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10700. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10701. uint8x8x4_t __ret; \
  10702. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
  10703. \
  10704. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10705. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10706. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10707. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10708. __ret; \
  10709. })
  10710. #endif
  10711. #ifdef __LITTLE_ENDIAN__
  10712. #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  10713. uint32x2x4_t __s1 = __p1; \
  10714. uint32x2x4_t __ret; \
  10715. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
  10716. __ret; \
  10717. })
  10718. #else
  10719. #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  10720. uint32x2x4_t __s1 = __p1; \
  10721. uint32x2x4_t __rev1; \
  10722. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  10723. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  10724. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  10725. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  10726. uint32x2x4_t __ret; \
  10727. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
  10728. \
  10729. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10730. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10731. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10732. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10733. __ret; \
  10734. })
  10735. #endif
  10736. #ifdef __LITTLE_ENDIAN__
  10737. #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  10738. uint16x4x4_t __s1 = __p1; \
  10739. uint16x4x4_t __ret; \
  10740. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
  10741. __ret; \
  10742. })
  10743. #else
  10744. #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  10745. uint16x4x4_t __s1 = __p1; \
  10746. uint16x4x4_t __rev1; \
  10747. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10748. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10749. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10750. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10751. uint16x4x4_t __ret; \
  10752. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
  10753. \
  10754. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10755. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10756. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10757. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10758. __ret; \
  10759. })
  10760. #endif
  10761. #ifdef __LITTLE_ENDIAN__
  10762. #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  10763. int8x8x4_t __s1 = __p1; \
  10764. int8x8x4_t __ret; \
  10765. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
  10766. __ret; \
  10767. })
  10768. #else
  10769. #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  10770. int8x8x4_t __s1 = __p1; \
  10771. int8x8x4_t __rev1; \
  10772. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10773. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10774. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10775. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10776. int8x8x4_t __ret; \
  10777. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
  10778. \
  10779. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  10780. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  10781. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  10782. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  10783. __ret; \
  10784. })
  10785. #endif
  10786. #ifdef __LITTLE_ENDIAN__
  10787. #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  10788. float32x2x4_t __s1 = __p1; \
  10789. float32x2x4_t __ret; \
  10790. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
  10791. __ret; \
  10792. })
  10793. #else
  10794. #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  10795. float32x2x4_t __s1 = __p1; \
  10796. float32x2x4_t __rev1; \
  10797. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  10798. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  10799. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  10800. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  10801. float32x2x4_t __ret; \
  10802. __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
  10803. \
  10804. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10805. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10806. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10807. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10808. __ret; \
  10809. })
  10810. #endif
  10811. #ifdef __LITTLE_ENDIAN__
  10812. #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  10813. float16x4x4_t __s1 = __p1; \
  10814. float16x4x4_t __ret; \
  10815. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
  10816. __ret; \
  10817. })
  10818. #else
  10819. #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  10820. float16x4x4_t __s1 = __p1; \
  10821. float16x4x4_t __rev1; \
  10822. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10823. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10824. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10825. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10826. float16x4x4_t __ret; \
  10827. __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
  10828. \
  10829. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10830. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10831. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10832. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10833. __ret; \
  10834. })
  10835. #endif
  10836. #ifdef __LITTLE_ENDIAN__
  10837. #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  10838. int32x2x4_t __s1 = __p1; \
  10839. int32x2x4_t __ret; \
  10840. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
  10841. __ret; \
  10842. })
  10843. #else
  10844. #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  10845. int32x2x4_t __s1 = __p1; \
  10846. int32x2x4_t __rev1; \
  10847. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  10848. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  10849. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  10850. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  10851. int32x2x4_t __ret; \
  10852. __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
  10853. \
  10854. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  10855. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  10856. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  10857. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  10858. __ret; \
  10859. })
  10860. #endif
  10861. #ifdef __LITTLE_ENDIAN__
  10862. #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  10863. int16x4x4_t __s1 = __p1; \
  10864. int16x4x4_t __ret; \
  10865. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
  10866. __ret; \
  10867. })
  10868. #else
  10869. #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  10870. int16x4x4_t __s1 = __p1; \
  10871. int16x4x4_t __rev1; \
  10872. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  10873. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  10874. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  10875. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  10876. int16x4x4_t __ret; \
  10877. __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
  10878. \
  10879. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  10880. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  10881. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  10882. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  10883. __ret; \
  10884. })
  10885. #endif
  10886. #ifdef __LITTLE_ENDIAN__
  10887. __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  10888. uint8x16_t __ret;
  10889. __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  10890. return __ret;
  10891. }
  10892. #else
  10893. __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  10894. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10895. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10896. uint8x16_t __ret;
  10897. __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  10898. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10899. return __ret;
  10900. }
  10901. #endif
  10902. #ifdef __LITTLE_ENDIAN__
  10903. __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  10904. uint32x4_t __ret;
  10905. __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  10906. return __ret;
  10907. }
  10908. #else
  10909. __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  10910. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  10911. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  10912. uint32x4_t __ret;
  10913. __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  10914. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  10915. return __ret;
  10916. }
  10917. #endif
  10918. #ifdef __LITTLE_ENDIAN__
  10919. __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  10920. uint16x8_t __ret;
  10921. __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  10922. return __ret;
  10923. }
  10924. #else
  10925. __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  10926. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  10927. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  10928. uint16x8_t __ret;
  10929. __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  10930. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  10931. return __ret;
  10932. }
  10933. #endif
  10934. #ifdef __LITTLE_ENDIAN__
  10935. __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
  10936. int8x16_t __ret;
  10937. __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  10938. return __ret;
  10939. }
  10940. #else
  10941. __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
  10942. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10943. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10944. int8x16_t __ret;
  10945. __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  10946. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  10947. return __ret;
  10948. }
  10949. #endif
  10950. #ifdef __LITTLE_ENDIAN__
  10951. __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
  10952. float32x4_t __ret;
  10953. __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  10954. return __ret;
  10955. }
  10956. #else
  10957. __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
  10958. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  10959. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  10960. float32x4_t __ret;
  10961. __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  10962. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  10963. return __ret;
  10964. }
  10965. #endif
  10966. #ifdef __LITTLE_ENDIAN__
  10967. __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
  10968. int32x4_t __ret;
  10969. __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  10970. return __ret;
  10971. }
  10972. #else
  10973. __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
  10974. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  10975. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  10976. int32x4_t __ret;
  10977. __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  10978. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  10979. return __ret;
  10980. }
  10981. #endif
  10982. #ifdef __LITTLE_ENDIAN__
  10983. __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
  10984. int16x8_t __ret;
  10985. __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  10986. return __ret;
  10987. }
  10988. #else
  10989. __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
  10990. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  10991. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  10992. int16x8_t __ret;
  10993. __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  10994. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  10995. return __ret;
  10996. }
  10997. #endif
  10998. #ifdef __LITTLE_ENDIAN__
  10999. __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
  11000. uint8x8_t __ret;
  11001. __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  11002. return __ret;
  11003. }
  11004. #else
  11005. __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
  11006. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11007. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11008. uint8x8_t __ret;
  11009. __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  11010. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11011. return __ret;
  11012. }
  11013. #endif
  11014. #ifdef __LITTLE_ENDIAN__
  11015. __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
  11016. uint32x2_t __ret;
  11017. __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  11018. return __ret;
  11019. }
  11020. #else
  11021. __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
  11022. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11023. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11024. uint32x2_t __ret;
  11025. __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  11026. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11027. return __ret;
  11028. }
  11029. #endif
  11030. #ifdef __LITTLE_ENDIAN__
  11031. __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
  11032. uint16x4_t __ret;
  11033. __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  11034. return __ret;
  11035. }
  11036. #else
  11037. __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
  11038. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11039. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11040. uint16x4_t __ret;
  11041. __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  11042. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11043. return __ret;
  11044. }
  11045. #endif
  11046. #ifdef __LITTLE_ENDIAN__
  11047. __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
  11048. int8x8_t __ret;
  11049. __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  11050. return __ret;
  11051. }
  11052. #else
  11053. __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
  11054. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11055. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11056. int8x8_t __ret;
  11057. __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  11058. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11059. return __ret;
  11060. }
  11061. #endif
  11062. #ifdef __LITTLE_ENDIAN__
  11063. __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
  11064. float32x2_t __ret;
  11065. __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  11066. return __ret;
  11067. }
  11068. #else
  11069. __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
  11070. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11071. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11072. float32x2_t __ret;
  11073. __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  11074. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11075. return __ret;
  11076. }
  11077. #endif
  11078. #ifdef __LITTLE_ENDIAN__
  11079. __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
  11080. int32x2_t __ret;
  11081. __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  11082. return __ret;
  11083. }
  11084. #else
  11085. __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
  11086. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11087. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11088. int32x2_t __ret;
  11089. __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  11090. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11091. return __ret;
  11092. }
  11093. #endif
  11094. #ifdef __LITTLE_ENDIAN__
  11095. __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
  11096. int16x4_t __ret;
  11097. __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  11098. return __ret;
  11099. }
  11100. #else
  11101. __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
  11102. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11103. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11104. int16x4_t __ret;
  11105. __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  11106. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11107. return __ret;
  11108. }
  11109. #endif
  11110. #ifdef __LITTLE_ENDIAN__
  11111. __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  11112. uint8x16_t __ret;
  11113. __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  11114. return __ret;
  11115. }
  11116. #else
  11117. __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  11118. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11119. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11120. uint8x16_t __ret;
  11121. __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  11122. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11123. return __ret;
  11124. }
  11125. #endif
  11126. #ifdef __LITTLE_ENDIAN__
  11127. __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  11128. uint32x4_t __ret;
  11129. __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  11130. return __ret;
  11131. }
  11132. #else
  11133. __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  11134. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11135. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11136. uint32x4_t __ret;
  11137. __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  11138. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11139. return __ret;
  11140. }
  11141. #endif
  11142. #ifdef __LITTLE_ENDIAN__
  11143. __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  11144. uint16x8_t __ret;
  11145. __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  11146. return __ret;
  11147. }
  11148. #else
  11149. __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  11150. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11151. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11152. uint16x8_t __ret;
  11153. __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  11154. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11155. return __ret;
  11156. }
  11157. #endif
  11158. #ifdef __LITTLE_ENDIAN__
  11159. __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
  11160. int8x16_t __ret;
  11161. __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  11162. return __ret;
  11163. }
  11164. #else
  11165. __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
  11166. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11167. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11168. int8x16_t __ret;
  11169. __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  11170. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11171. return __ret;
  11172. }
  11173. #endif
  11174. #ifdef __LITTLE_ENDIAN__
  11175. __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
  11176. float32x4_t __ret;
  11177. __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  11178. return __ret;
  11179. }
  11180. #else
  11181. __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
  11182. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11183. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11184. float32x4_t __ret;
  11185. __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  11186. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11187. return __ret;
  11188. }
  11189. #endif
  11190. #ifdef __LITTLE_ENDIAN__
  11191. __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
  11192. int32x4_t __ret;
  11193. __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  11194. return __ret;
  11195. }
  11196. #else
  11197. __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
  11198. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11199. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11200. int32x4_t __ret;
  11201. __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  11202. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11203. return __ret;
  11204. }
  11205. #endif
  11206. #ifdef __LITTLE_ENDIAN__
  11207. __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
  11208. int16x8_t __ret;
  11209. __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  11210. return __ret;
  11211. }
  11212. #else
  11213. __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
  11214. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11215. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11216. int16x8_t __ret;
  11217. __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  11218. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11219. return __ret;
  11220. }
  11221. #endif
  11222. #ifdef __LITTLE_ENDIAN__
  11223. __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
  11224. uint8x8_t __ret;
  11225. __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  11226. return __ret;
  11227. }
  11228. #else
  11229. __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
  11230. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11231. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11232. uint8x8_t __ret;
  11233. __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  11234. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11235. return __ret;
  11236. }
  11237. #endif
  11238. #ifdef __LITTLE_ENDIAN__
  11239. __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
  11240. uint32x2_t __ret;
  11241. __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  11242. return __ret;
  11243. }
  11244. #else
  11245. __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
  11246. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11247. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11248. uint32x2_t __ret;
  11249. __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  11250. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11251. return __ret;
  11252. }
  11253. #endif
  11254. #ifdef __LITTLE_ENDIAN__
  11255. __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
  11256. uint16x4_t __ret;
  11257. __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  11258. return __ret;
  11259. }
  11260. #else
  11261. __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
  11262. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11263. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11264. uint16x4_t __ret;
  11265. __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  11266. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11267. return __ret;
  11268. }
  11269. #endif
  11270. #ifdef __LITTLE_ENDIAN__
  11271. __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
  11272. int8x8_t __ret;
  11273. __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  11274. return __ret;
  11275. }
  11276. #else
  11277. __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
  11278. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11279. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11280. int8x8_t __ret;
  11281. __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  11282. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11283. return __ret;
  11284. }
  11285. #endif
  11286. #ifdef __LITTLE_ENDIAN__
  11287. __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
  11288. float32x2_t __ret;
  11289. __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  11290. return __ret;
  11291. }
  11292. #else
  11293. __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
  11294. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11295. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11296. float32x2_t __ret;
  11297. __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  11298. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11299. return __ret;
  11300. }
  11301. #endif
  11302. #ifdef __LITTLE_ENDIAN__
  11303. __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
  11304. int32x2_t __ret;
  11305. __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  11306. return __ret;
  11307. }
  11308. #else
  11309. __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
  11310. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11311. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11312. int32x2_t __ret;
  11313. __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  11314. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11315. return __ret;
  11316. }
  11317. #endif
  11318. #ifdef __LITTLE_ENDIAN__
  11319. __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
  11320. int16x4_t __ret;
  11321. __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  11322. return __ret;
  11323. }
  11324. #else
  11325. __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
  11326. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11327. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11328. int16x4_t __ret;
  11329. __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  11330. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11331. return __ret;
  11332. }
  11333. #endif
  11334. #ifdef __LITTLE_ENDIAN__
  11335. __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  11336. uint8x16_t __ret;
  11337. __ret = __p0 + __p1 * __p2;
  11338. return __ret;
  11339. }
  11340. #else
  11341. __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  11342. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11343. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11344. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11345. uint8x16_t __ret;
  11346. __ret = __rev0 + __rev1 * __rev2;
  11347. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11348. return __ret;
  11349. }
  11350. #endif
  11351. #ifdef __LITTLE_ENDIAN__
  11352. __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  11353. uint32x4_t __ret;
  11354. __ret = __p0 + __p1 * __p2;
  11355. return __ret;
  11356. }
  11357. #else
  11358. __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  11359. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11360. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11361. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11362. uint32x4_t __ret;
  11363. __ret = __rev0 + __rev1 * __rev2;
  11364. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11365. return __ret;
  11366. }
  11367. #endif
  11368. #ifdef __LITTLE_ENDIAN__
  11369. __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  11370. uint16x8_t __ret;
  11371. __ret = __p0 + __p1 * __p2;
  11372. return __ret;
  11373. }
  11374. #else
  11375. __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  11376. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11377. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11378. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  11379. uint16x8_t __ret;
  11380. __ret = __rev0 + __rev1 * __rev2;
  11381. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11382. return __ret;
  11383. }
  11384. #endif
  11385. #ifdef __LITTLE_ENDIAN__
  11386. __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  11387. int8x16_t __ret;
  11388. __ret = __p0 + __p1 * __p2;
  11389. return __ret;
  11390. }
  11391. #else
  11392. __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  11393. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11394. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11395. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11396. int8x16_t __ret;
  11397. __ret = __rev0 + __rev1 * __rev2;
  11398. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11399. return __ret;
  11400. }
  11401. #endif
  11402. #ifdef __LITTLE_ENDIAN__
  11403. __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  11404. float32x4_t __ret;
  11405. __ret = __p0 + __p1 * __p2;
  11406. return __ret;
  11407. }
  11408. #else
  11409. __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  11410. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11411. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11412. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11413. float32x4_t __ret;
  11414. __ret = __rev0 + __rev1 * __rev2;
  11415. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11416. return __ret;
  11417. }
  11418. #endif
  11419. #ifdef __LITTLE_ENDIAN__
  11420. __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  11421. int32x4_t __ret;
  11422. __ret = __p0 + __p1 * __p2;
  11423. return __ret;
  11424. }
  11425. #else
  11426. __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  11427. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11428. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11429. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11430. int32x4_t __ret;
  11431. __ret = __rev0 + __rev1 * __rev2;
  11432. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11433. return __ret;
  11434. }
  11435. #endif
  11436. #ifdef __LITTLE_ENDIAN__
  11437. __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  11438. int16x8_t __ret;
  11439. __ret = __p0 + __p1 * __p2;
  11440. return __ret;
  11441. }
  11442. #else
  11443. __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  11444. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11445. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11446. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  11447. int16x8_t __ret;
  11448. __ret = __rev0 + __rev1 * __rev2;
  11449. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11450. return __ret;
  11451. }
  11452. #endif
  11453. #ifdef __LITTLE_ENDIAN__
  11454. __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  11455. uint8x8_t __ret;
  11456. __ret = __p0 + __p1 * __p2;
  11457. return __ret;
  11458. }
  11459. #else
  11460. __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  11461. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11462. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11463. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  11464. uint8x8_t __ret;
  11465. __ret = __rev0 + __rev1 * __rev2;
  11466. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11467. return __ret;
  11468. }
  11469. #endif
  11470. #ifdef __LITTLE_ENDIAN__
  11471. __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  11472. uint32x2_t __ret;
  11473. __ret = __p0 + __p1 * __p2;
  11474. return __ret;
  11475. }
  11476. #else
  11477. __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  11478. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11479. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11480. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  11481. uint32x2_t __ret;
  11482. __ret = __rev0 + __rev1 * __rev2;
  11483. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11484. return __ret;
  11485. }
  11486. #endif
  11487. #ifdef __LITTLE_ENDIAN__
  11488. __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  11489. uint16x4_t __ret;
  11490. __ret = __p0 + __p1 * __p2;
  11491. return __ret;
  11492. }
  11493. #else
  11494. __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  11495. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11496. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11497. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11498. uint16x4_t __ret;
  11499. __ret = __rev0 + __rev1 * __rev2;
  11500. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11501. return __ret;
  11502. }
  11503. #endif
  11504. #ifdef __LITTLE_ENDIAN__
  11505. __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  11506. int8x8_t __ret;
  11507. __ret = __p0 + __p1 * __p2;
  11508. return __ret;
  11509. }
  11510. #else
  11511. __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  11512. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11513. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11514. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  11515. int8x8_t __ret;
  11516. __ret = __rev0 + __rev1 * __rev2;
  11517. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11518. return __ret;
  11519. }
  11520. #endif
  11521. #ifdef __LITTLE_ENDIAN__
  11522. __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  11523. float32x2_t __ret;
  11524. __ret = __p0 + __p1 * __p2;
  11525. return __ret;
  11526. }
  11527. #else
  11528. __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  11529. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11530. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11531. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  11532. float32x2_t __ret;
  11533. __ret = __rev0 + __rev1 * __rev2;
  11534. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11535. return __ret;
  11536. }
  11537. #endif
  11538. #ifdef __LITTLE_ENDIAN__
  11539. __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  11540. int32x2_t __ret;
  11541. __ret = __p0 + __p1 * __p2;
  11542. return __ret;
  11543. }
  11544. #else
  11545. __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  11546. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11547. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11548. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  11549. int32x2_t __ret;
  11550. __ret = __rev0 + __rev1 * __rev2;
  11551. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11552. return __ret;
  11553. }
  11554. #endif
  11555. #ifdef __LITTLE_ENDIAN__
  11556. __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  11557. int16x4_t __ret;
  11558. __ret = __p0 + __p1 * __p2;
  11559. return __ret;
  11560. }
  11561. #else
  11562. __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  11563. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11564. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11565. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11566. int16x4_t __ret;
  11567. __ret = __rev0 + __rev1 * __rev2;
  11568. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11569. return __ret;
  11570. }
  11571. #endif
  11572. #ifdef __LITTLE_ENDIAN__
  11573. #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11574. uint32x4_t __s0 = __p0; \
  11575. uint32x4_t __s1 = __p1; \
  11576. uint32x2_t __s2 = __p2; \
  11577. uint32x4_t __ret; \
  11578. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  11579. __ret; \
  11580. })
  11581. #else
  11582. #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11583. uint32x4_t __s0 = __p0; \
  11584. uint32x4_t __s1 = __p1; \
  11585. uint32x2_t __s2 = __p2; \
  11586. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  11587. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  11588. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11589. uint32x4_t __ret; \
  11590. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  11591. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  11592. __ret; \
  11593. })
  11594. #endif
  11595. #ifdef __LITTLE_ENDIAN__
  11596. #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11597. uint16x8_t __s0 = __p0; \
  11598. uint16x8_t __s1 = __p1; \
  11599. uint16x4_t __s2 = __p2; \
  11600. uint16x8_t __ret; \
  11601. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  11602. __ret; \
  11603. })
  11604. #else
  11605. #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11606. uint16x8_t __s0 = __p0; \
  11607. uint16x8_t __s1 = __p1; \
  11608. uint16x4_t __s2 = __p2; \
  11609. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  11610. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  11611. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  11612. uint16x8_t __ret; \
  11613. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  11614. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  11615. __ret; \
  11616. })
  11617. #endif
  11618. #ifdef __LITTLE_ENDIAN__
  11619. #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11620. float32x4_t __s0 = __p0; \
  11621. float32x4_t __s1 = __p1; \
  11622. float32x2_t __s2 = __p2; \
  11623. float32x4_t __ret; \
  11624. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  11625. __ret; \
  11626. })
  11627. #else
  11628. #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11629. float32x4_t __s0 = __p0; \
  11630. float32x4_t __s1 = __p1; \
  11631. float32x2_t __s2 = __p2; \
  11632. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  11633. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  11634. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11635. float32x4_t __ret; \
  11636. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  11637. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  11638. __ret; \
  11639. })
  11640. #endif
  11641. #ifdef __LITTLE_ENDIAN__
  11642. #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11643. int32x4_t __s0 = __p0; \
  11644. int32x4_t __s1 = __p1; \
  11645. int32x2_t __s2 = __p2; \
  11646. int32x4_t __ret; \
  11647. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  11648. __ret; \
  11649. })
  11650. #else
  11651. #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11652. int32x4_t __s0 = __p0; \
  11653. int32x4_t __s1 = __p1; \
  11654. int32x2_t __s2 = __p2; \
  11655. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  11656. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  11657. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11658. int32x4_t __ret; \
  11659. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  11660. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  11661. __ret; \
  11662. })
  11663. #endif
  11664. #ifdef __LITTLE_ENDIAN__
  11665. #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11666. int16x8_t __s0 = __p0; \
  11667. int16x8_t __s1 = __p1; \
  11668. int16x4_t __s2 = __p2; \
  11669. int16x8_t __ret; \
  11670. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  11671. __ret; \
  11672. })
  11673. #else
  11674. #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11675. int16x8_t __s0 = __p0; \
  11676. int16x8_t __s1 = __p1; \
  11677. int16x4_t __s2 = __p2; \
  11678. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  11679. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  11680. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  11681. int16x8_t __ret; \
  11682. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  11683. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  11684. __ret; \
  11685. })
  11686. #endif
  11687. #ifdef __LITTLE_ENDIAN__
  11688. #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11689. uint32x2_t __s0 = __p0; \
  11690. uint32x2_t __s1 = __p1; \
  11691. uint32x2_t __s2 = __p2; \
  11692. uint32x2_t __ret; \
  11693. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  11694. __ret; \
  11695. })
  11696. #else
  11697. #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11698. uint32x2_t __s0 = __p0; \
  11699. uint32x2_t __s1 = __p1; \
  11700. uint32x2_t __s2 = __p2; \
  11701. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  11702. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  11703. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11704. uint32x2_t __ret; \
  11705. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  11706. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  11707. __ret; \
  11708. })
  11709. #endif
  11710. #ifdef __LITTLE_ENDIAN__
  11711. #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11712. uint16x4_t __s0 = __p0; \
  11713. uint16x4_t __s1 = __p1; \
  11714. uint16x4_t __s2 = __p2; \
  11715. uint16x4_t __ret; \
  11716. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  11717. __ret; \
  11718. })
  11719. #else
  11720. #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11721. uint16x4_t __s0 = __p0; \
  11722. uint16x4_t __s1 = __p1; \
  11723. uint16x4_t __s2 = __p2; \
  11724. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  11725. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  11726. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  11727. uint16x4_t __ret; \
  11728. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  11729. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  11730. __ret; \
  11731. })
  11732. #endif
  11733. #ifdef __LITTLE_ENDIAN__
  11734. #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11735. float32x2_t __s0 = __p0; \
  11736. float32x2_t __s1 = __p1; \
  11737. float32x2_t __s2 = __p2; \
  11738. float32x2_t __ret; \
  11739. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  11740. __ret; \
  11741. })
  11742. #else
  11743. #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11744. float32x2_t __s0 = __p0; \
  11745. float32x2_t __s1 = __p1; \
  11746. float32x2_t __s2 = __p2; \
  11747. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  11748. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  11749. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11750. float32x2_t __ret; \
  11751. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  11752. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  11753. __ret; \
  11754. })
  11755. #endif
  11756. #ifdef __LITTLE_ENDIAN__
  11757. #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11758. int32x2_t __s0 = __p0; \
  11759. int32x2_t __s1 = __p1; \
  11760. int32x2_t __s2 = __p2; \
  11761. int32x2_t __ret; \
  11762. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  11763. __ret; \
  11764. })
  11765. #else
  11766. #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  11767. int32x2_t __s0 = __p0; \
  11768. int32x2_t __s1 = __p1; \
  11769. int32x2_t __s2 = __p2; \
  11770. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  11771. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  11772. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  11773. int32x2_t __ret; \
  11774. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  11775. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  11776. __ret; \
  11777. })
  11778. #endif
  11779. #ifdef __LITTLE_ENDIAN__
  11780. #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11781. int16x4_t __s0 = __p0; \
  11782. int16x4_t __s1 = __p1; \
  11783. int16x4_t __s2 = __p2; \
  11784. int16x4_t __ret; \
  11785. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  11786. __ret; \
  11787. })
  11788. #else
  11789. #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  11790. int16x4_t __s0 = __p0; \
  11791. int16x4_t __s1 = __p1; \
  11792. int16x4_t __s2 = __p2; \
  11793. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  11794. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  11795. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  11796. int16x4_t __ret; \
  11797. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  11798. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  11799. __ret; \
  11800. })
  11801. #endif
  11802. #ifdef __LITTLE_ENDIAN__
  11803. __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
  11804. uint32x4_t __ret;
  11805. __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
  11806. return __ret;
  11807. }
  11808. #else
  11809. __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
  11810. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11811. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11812. uint32x4_t __ret;
  11813. __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
  11814. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11815. return __ret;
  11816. }
  11817. #endif
  11818. #ifdef __LITTLE_ENDIAN__
  11819. __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
  11820. uint16x8_t __ret;
  11821. __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  11822. return __ret;
  11823. }
  11824. #else
  11825. __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
  11826. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11827. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11828. uint16x8_t __ret;
  11829. __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  11830. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11831. return __ret;
  11832. }
  11833. #endif
  11834. #ifdef __LITTLE_ENDIAN__
  11835. __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  11836. float32x4_t __ret;
  11837. __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
  11838. return __ret;
  11839. }
  11840. #else
  11841. __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  11842. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11843. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11844. float32x4_t __ret;
  11845. __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
  11846. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11847. return __ret;
  11848. }
  11849. #endif
  11850. #ifdef __LITTLE_ENDIAN__
  11851. __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
  11852. int32x4_t __ret;
  11853. __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
  11854. return __ret;
  11855. }
  11856. #else
  11857. __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
  11858. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11859. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11860. int32x4_t __ret;
  11861. __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
  11862. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11863. return __ret;
  11864. }
  11865. #endif
  11866. #ifdef __LITTLE_ENDIAN__
  11867. __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
  11868. int16x8_t __ret;
  11869. __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  11870. return __ret;
  11871. }
  11872. #else
  11873. __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
  11874. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  11875. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  11876. int16x8_t __ret;
  11877. __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  11878. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  11879. return __ret;
  11880. }
  11881. #endif
  11882. #ifdef __LITTLE_ENDIAN__
  11883. __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  11884. uint32x2_t __ret;
  11885. __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
  11886. return __ret;
  11887. }
  11888. #else
  11889. __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  11890. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11891. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11892. uint32x2_t __ret;
  11893. __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
  11894. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11895. return __ret;
  11896. }
  11897. #endif
  11898. #ifdef __LITTLE_ENDIAN__
  11899. __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  11900. uint16x4_t __ret;
  11901. __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
  11902. return __ret;
  11903. }
  11904. #else
  11905. __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  11906. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11907. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11908. uint16x4_t __ret;
  11909. __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
  11910. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11911. return __ret;
  11912. }
  11913. #endif
  11914. #ifdef __LITTLE_ENDIAN__
  11915. __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  11916. float32x2_t __ret;
  11917. __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
  11918. return __ret;
  11919. }
  11920. #else
  11921. __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  11922. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11923. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11924. float32x2_t __ret;
  11925. __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
  11926. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11927. return __ret;
  11928. }
  11929. #endif
  11930. #ifdef __LITTLE_ENDIAN__
  11931. __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
  11932. int32x2_t __ret;
  11933. __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
  11934. return __ret;
  11935. }
  11936. #else
  11937. __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
  11938. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  11939. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  11940. int32x2_t __ret;
  11941. __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
  11942. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  11943. return __ret;
  11944. }
  11945. #endif
  11946. #ifdef __LITTLE_ENDIAN__
  11947. __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
  11948. int16x4_t __ret;
  11949. __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
  11950. return __ret;
  11951. }
  11952. #else
  11953. __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
  11954. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11955. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11956. int16x4_t __ret;
  11957. __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
  11958. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11959. return __ret;
  11960. }
  11961. #endif
  11962. #ifdef __LITTLE_ENDIAN__
  11963. __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  11964. uint8x16_t __ret;
  11965. __ret = __p0 - __p1 * __p2;
  11966. return __ret;
  11967. }
  11968. #else
  11969. __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  11970. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11971. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11972. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11973. uint8x16_t __ret;
  11974. __ret = __rev0 - __rev1 * __rev2;
  11975. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  11976. return __ret;
  11977. }
  11978. #endif
  11979. #ifdef __LITTLE_ENDIAN__
  11980. __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  11981. uint32x4_t __ret;
  11982. __ret = __p0 - __p1 * __p2;
  11983. return __ret;
  11984. }
  11985. #else
  11986. __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  11987. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  11988. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  11989. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  11990. uint32x4_t __ret;
  11991. __ret = __rev0 - __rev1 * __rev2;
  11992. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  11993. return __ret;
  11994. }
  11995. #endif
  11996. #ifdef __LITTLE_ENDIAN__
  11997. __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  11998. uint16x8_t __ret;
  11999. __ret = __p0 - __p1 * __p2;
  12000. return __ret;
  12001. }
  12002. #else
  12003. __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  12004. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12005. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12006. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  12007. uint16x8_t __ret;
  12008. __ret = __rev0 - __rev1 * __rev2;
  12009. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12010. return __ret;
  12011. }
  12012. #endif
  12013. #ifdef __LITTLE_ENDIAN__
  12014. __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  12015. int8x16_t __ret;
  12016. __ret = __p0 - __p1 * __p2;
  12017. return __ret;
  12018. }
  12019. #else
  12020. __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  12021. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12022. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12023. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12024. int8x16_t __ret;
  12025. __ret = __rev0 - __rev1 * __rev2;
  12026. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12027. return __ret;
  12028. }
  12029. #endif
  12030. #ifdef __LITTLE_ENDIAN__
  12031. __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  12032. float32x4_t __ret;
  12033. __ret = __p0 - __p1 * __p2;
  12034. return __ret;
  12035. }
  12036. #else
  12037. __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  12038. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12039. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12040. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  12041. float32x4_t __ret;
  12042. __ret = __rev0 - __rev1 * __rev2;
  12043. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12044. return __ret;
  12045. }
  12046. #endif
  12047. #ifdef __LITTLE_ENDIAN__
  12048. __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  12049. int32x4_t __ret;
  12050. __ret = __p0 - __p1 * __p2;
  12051. return __ret;
  12052. }
  12053. #else
  12054. __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  12055. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12056. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12057. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  12058. int32x4_t __ret;
  12059. __ret = __rev0 - __rev1 * __rev2;
  12060. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12061. return __ret;
  12062. }
  12063. #endif
  12064. #ifdef __LITTLE_ENDIAN__
  12065. __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  12066. int16x8_t __ret;
  12067. __ret = __p0 - __p1 * __p2;
  12068. return __ret;
  12069. }
  12070. #else
  12071. __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  12072. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12073. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12074. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  12075. int16x8_t __ret;
  12076. __ret = __rev0 - __rev1 * __rev2;
  12077. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12078. return __ret;
  12079. }
  12080. #endif
  12081. #ifdef __LITTLE_ENDIAN__
  12082. __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  12083. uint8x8_t __ret;
  12084. __ret = __p0 - __p1 * __p2;
  12085. return __ret;
  12086. }
  12087. #else
  12088. __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  12089. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12090. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12091. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  12092. uint8x8_t __ret;
  12093. __ret = __rev0 - __rev1 * __rev2;
  12094. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12095. return __ret;
  12096. }
  12097. #endif
  12098. #ifdef __LITTLE_ENDIAN__
  12099. __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  12100. uint32x2_t __ret;
  12101. __ret = __p0 - __p1 * __p2;
  12102. return __ret;
  12103. }
  12104. #else
  12105. __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  12106. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12107. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12108. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  12109. uint32x2_t __ret;
  12110. __ret = __rev0 - __rev1 * __rev2;
  12111. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12112. return __ret;
  12113. }
  12114. #endif
  12115. #ifdef __LITTLE_ENDIAN__
  12116. __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  12117. uint16x4_t __ret;
  12118. __ret = __p0 - __p1 * __p2;
  12119. return __ret;
  12120. }
  12121. #else
  12122. __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  12123. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12124. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12125. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  12126. uint16x4_t __ret;
  12127. __ret = __rev0 - __rev1 * __rev2;
  12128. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12129. return __ret;
  12130. }
  12131. #endif
  12132. #ifdef __LITTLE_ENDIAN__
  12133. __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  12134. int8x8_t __ret;
  12135. __ret = __p0 - __p1 * __p2;
  12136. return __ret;
  12137. }
  12138. #else
  12139. __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  12140. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12141. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12142. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  12143. int8x8_t __ret;
  12144. __ret = __rev0 - __rev1 * __rev2;
  12145. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12146. return __ret;
  12147. }
  12148. #endif
  12149. #ifdef __LITTLE_ENDIAN__
  12150. __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  12151. float32x2_t __ret;
  12152. __ret = __p0 - __p1 * __p2;
  12153. return __ret;
  12154. }
  12155. #else
  12156. __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  12157. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12158. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12159. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  12160. float32x2_t __ret;
  12161. __ret = __rev0 - __rev1 * __rev2;
  12162. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12163. return __ret;
  12164. }
  12165. #endif
  12166. #ifdef __LITTLE_ENDIAN__
  12167. __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  12168. int32x2_t __ret;
  12169. __ret = __p0 - __p1 * __p2;
  12170. return __ret;
  12171. }
  12172. #else
  12173. __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  12174. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12175. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12176. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  12177. int32x2_t __ret;
  12178. __ret = __rev0 - __rev1 * __rev2;
  12179. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12180. return __ret;
  12181. }
  12182. #endif
  12183. #ifdef __LITTLE_ENDIAN__
  12184. __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  12185. int16x4_t __ret;
  12186. __ret = __p0 - __p1 * __p2;
  12187. return __ret;
  12188. }
  12189. #else
  12190. __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  12191. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12192. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12193. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  12194. int16x4_t __ret;
  12195. __ret = __rev0 - __rev1 * __rev2;
  12196. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12197. return __ret;
  12198. }
  12199. #endif
  12200. #ifdef __LITTLE_ENDIAN__
  12201. #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12202. uint32x4_t __s0 = __p0; \
  12203. uint32x4_t __s1 = __p1; \
  12204. uint32x2_t __s2 = __p2; \
  12205. uint32x4_t __ret; \
  12206. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  12207. __ret; \
  12208. })
  12209. #else
  12210. #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12211. uint32x4_t __s0 = __p0; \
  12212. uint32x4_t __s1 = __p1; \
  12213. uint32x2_t __s2 = __p2; \
  12214. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  12215. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  12216. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12217. uint32x4_t __ret; \
  12218. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  12219. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12220. __ret; \
  12221. })
  12222. #endif
  12223. #ifdef __LITTLE_ENDIAN__
  12224. #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12225. uint16x8_t __s0 = __p0; \
  12226. uint16x8_t __s1 = __p1; \
  12227. uint16x4_t __s2 = __p2; \
  12228. uint16x8_t __ret; \
  12229. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  12230. __ret; \
  12231. })
  12232. #else
  12233. #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12234. uint16x8_t __s0 = __p0; \
  12235. uint16x8_t __s1 = __p1; \
  12236. uint16x4_t __s2 = __p2; \
  12237. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  12238. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  12239. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  12240. uint16x8_t __ret; \
  12241. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  12242. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  12243. __ret; \
  12244. })
  12245. #endif
  12246. #ifdef __LITTLE_ENDIAN__
  12247. #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12248. float32x4_t __s0 = __p0; \
  12249. float32x4_t __s1 = __p1; \
  12250. float32x2_t __s2 = __p2; \
  12251. float32x4_t __ret; \
  12252. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  12253. __ret; \
  12254. })
  12255. #else
  12256. #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12257. float32x4_t __s0 = __p0; \
  12258. float32x4_t __s1 = __p1; \
  12259. float32x2_t __s2 = __p2; \
  12260. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  12261. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  12262. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12263. float32x4_t __ret; \
  12264. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  12265. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12266. __ret; \
  12267. })
  12268. #endif
  12269. #ifdef __LITTLE_ENDIAN__
  12270. #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12271. int32x4_t __s0 = __p0; \
  12272. int32x4_t __s1 = __p1; \
  12273. int32x2_t __s2 = __p2; \
  12274. int32x4_t __ret; \
  12275. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  12276. __ret; \
  12277. })
  12278. #else
  12279. #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12280. int32x4_t __s0 = __p0; \
  12281. int32x4_t __s1 = __p1; \
  12282. int32x2_t __s2 = __p2; \
  12283. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  12284. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  12285. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12286. int32x4_t __ret; \
  12287. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  12288. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12289. __ret; \
  12290. })
  12291. #endif
  12292. #ifdef __LITTLE_ENDIAN__
  12293. #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12294. int16x8_t __s0 = __p0; \
  12295. int16x8_t __s1 = __p1; \
  12296. int16x4_t __s2 = __p2; \
  12297. int16x8_t __ret; \
  12298. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  12299. __ret; \
  12300. })
  12301. #else
  12302. #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12303. int16x8_t __s0 = __p0; \
  12304. int16x8_t __s1 = __p1; \
  12305. int16x4_t __s2 = __p2; \
  12306. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  12307. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  12308. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  12309. int16x8_t __ret; \
  12310. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  12311. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  12312. __ret; \
  12313. })
  12314. #endif
  12315. #ifdef __LITTLE_ENDIAN__
  12316. #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12317. uint32x2_t __s0 = __p0; \
  12318. uint32x2_t __s1 = __p1; \
  12319. uint32x2_t __s2 = __p2; \
  12320. uint32x2_t __ret; \
  12321. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  12322. __ret; \
  12323. })
  12324. #else
  12325. #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12326. uint32x2_t __s0 = __p0; \
  12327. uint32x2_t __s1 = __p1; \
  12328. uint32x2_t __s2 = __p2; \
  12329. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  12330. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  12331. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12332. uint32x2_t __ret; \
  12333. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  12334. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  12335. __ret; \
  12336. })
  12337. #endif
  12338. #ifdef __LITTLE_ENDIAN__
  12339. #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12340. uint16x4_t __s0 = __p0; \
  12341. uint16x4_t __s1 = __p1; \
  12342. uint16x4_t __s2 = __p2; \
  12343. uint16x4_t __ret; \
  12344. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  12345. __ret; \
  12346. })
  12347. #else
  12348. #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12349. uint16x4_t __s0 = __p0; \
  12350. uint16x4_t __s1 = __p1; \
  12351. uint16x4_t __s2 = __p2; \
  12352. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  12353. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  12354. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  12355. uint16x4_t __ret; \
  12356. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  12357. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12358. __ret; \
  12359. })
  12360. #endif
  12361. #ifdef __LITTLE_ENDIAN__
  12362. #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12363. float32x2_t __s0 = __p0; \
  12364. float32x2_t __s1 = __p1; \
  12365. float32x2_t __s2 = __p2; \
  12366. float32x2_t __ret; \
  12367. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  12368. __ret; \
  12369. })
  12370. #else
  12371. #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12372. float32x2_t __s0 = __p0; \
  12373. float32x2_t __s1 = __p1; \
  12374. float32x2_t __s2 = __p2; \
  12375. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  12376. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  12377. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12378. float32x2_t __ret; \
  12379. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  12380. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  12381. __ret; \
  12382. })
  12383. #endif
  12384. #ifdef __LITTLE_ENDIAN__
  12385. #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12386. int32x2_t __s0 = __p0; \
  12387. int32x2_t __s1 = __p1; \
  12388. int32x2_t __s2 = __p2; \
  12389. int32x2_t __ret; \
  12390. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  12391. __ret; \
  12392. })
  12393. #else
  12394. #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  12395. int32x2_t __s0 = __p0; \
  12396. int32x2_t __s1 = __p1; \
  12397. int32x2_t __s2 = __p2; \
  12398. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  12399. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  12400. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  12401. int32x2_t __ret; \
  12402. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  12403. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  12404. __ret; \
  12405. })
  12406. #endif
  12407. #ifdef __LITTLE_ENDIAN__
  12408. #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12409. int16x4_t __s0 = __p0; \
  12410. int16x4_t __s1 = __p1; \
  12411. int16x4_t __s2 = __p2; \
  12412. int16x4_t __ret; \
  12413. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  12414. __ret; \
  12415. })
  12416. #else
  12417. #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  12418. int16x4_t __s0 = __p0; \
  12419. int16x4_t __s1 = __p1; \
  12420. int16x4_t __s2 = __p2; \
  12421. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  12422. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  12423. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  12424. int16x4_t __ret; \
  12425. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  12426. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12427. __ret; \
  12428. })
  12429. #endif
  12430. #ifdef __LITTLE_ENDIAN__
  12431. __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
  12432. uint32x4_t __ret;
  12433. __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
  12434. return __ret;
  12435. }
  12436. #else
  12437. __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
  12438. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12439. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12440. uint32x4_t __ret;
  12441. __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
  12442. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12443. return __ret;
  12444. }
  12445. #endif
  12446. #ifdef __LITTLE_ENDIAN__
  12447. __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
  12448. uint16x8_t __ret;
  12449. __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  12450. return __ret;
  12451. }
  12452. #else
  12453. __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
  12454. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12455. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12456. uint16x8_t __ret;
  12457. __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  12458. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12459. return __ret;
  12460. }
  12461. #endif
  12462. #ifdef __LITTLE_ENDIAN__
  12463. __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  12464. float32x4_t __ret;
  12465. __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
  12466. return __ret;
  12467. }
  12468. #else
  12469. __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  12470. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12471. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12472. float32x4_t __ret;
  12473. __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
  12474. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12475. return __ret;
  12476. }
  12477. #endif
  12478. #ifdef __LITTLE_ENDIAN__
  12479. __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
  12480. int32x4_t __ret;
  12481. __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
  12482. return __ret;
  12483. }
  12484. #else
  12485. __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
  12486. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12487. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12488. int32x4_t __ret;
  12489. __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
  12490. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12491. return __ret;
  12492. }
  12493. #endif
  12494. #ifdef __LITTLE_ENDIAN__
  12495. __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
  12496. int16x8_t __ret;
  12497. __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  12498. return __ret;
  12499. }
  12500. #else
  12501. __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
  12502. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12503. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  12504. int16x8_t __ret;
  12505. __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
  12506. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12507. return __ret;
  12508. }
  12509. #endif
  12510. #ifdef __LITTLE_ENDIAN__
  12511. __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  12512. uint32x2_t __ret;
  12513. __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
  12514. return __ret;
  12515. }
  12516. #else
  12517. __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  12518. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12519. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12520. uint32x2_t __ret;
  12521. __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
  12522. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12523. return __ret;
  12524. }
  12525. #endif
  12526. #ifdef __LITTLE_ENDIAN__
  12527. __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  12528. uint16x4_t __ret;
  12529. __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
  12530. return __ret;
  12531. }
  12532. #else
  12533. __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  12534. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12535. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12536. uint16x4_t __ret;
  12537. __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
  12538. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12539. return __ret;
  12540. }
  12541. #endif
  12542. #ifdef __LITTLE_ENDIAN__
  12543. __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  12544. float32x2_t __ret;
  12545. __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
  12546. return __ret;
  12547. }
  12548. #else
  12549. __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  12550. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12551. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12552. float32x2_t __ret;
  12553. __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
  12554. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12555. return __ret;
  12556. }
  12557. #endif
  12558. #ifdef __LITTLE_ENDIAN__
  12559. __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
  12560. int32x2_t __ret;
  12561. __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
  12562. return __ret;
  12563. }
  12564. #else
  12565. __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
  12566. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12567. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  12568. int32x2_t __ret;
  12569. __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
  12570. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12571. return __ret;
  12572. }
  12573. #endif
  12574. #ifdef __LITTLE_ENDIAN__
  12575. __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
  12576. int16x4_t __ret;
  12577. __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
  12578. return __ret;
  12579. }
  12580. #else
  12581. __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
  12582. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12583. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  12584. int16x4_t __ret;
  12585. __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
  12586. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12587. return __ret;
  12588. }
  12589. #endif
  12590. #ifdef __LITTLE_ENDIAN__
  12591. __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
  12592. poly8x8_t __ret;
  12593. __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12594. return __ret;
  12595. }
  12596. #else
  12597. __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
  12598. poly8x8_t __ret;
  12599. __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12600. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12601. return __ret;
  12602. }
  12603. #endif
  12604. #ifdef __LITTLE_ENDIAN__
  12605. __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
  12606. poly16x4_t __ret;
  12607. __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
  12608. return __ret;
  12609. }
  12610. #else
  12611. __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
  12612. poly16x4_t __ret;
  12613. __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
  12614. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12615. return __ret;
  12616. }
  12617. #endif
  12618. #ifdef __LITTLE_ENDIAN__
  12619. __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
  12620. poly8x16_t __ret;
  12621. __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12622. return __ret;
  12623. }
  12624. #else
  12625. __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
  12626. poly8x16_t __ret;
  12627. __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12628. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12629. return __ret;
  12630. }
  12631. #endif
  12632. #ifdef __LITTLE_ENDIAN__
  12633. __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
  12634. poly16x8_t __ret;
  12635. __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12636. return __ret;
  12637. }
  12638. #else
  12639. __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
  12640. poly16x8_t __ret;
  12641. __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12642. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12643. return __ret;
  12644. }
  12645. #endif
  12646. #ifdef __LITTLE_ENDIAN__
  12647. __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
  12648. uint8x16_t __ret;
  12649. __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12650. return __ret;
  12651. }
  12652. #else
  12653. __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
  12654. uint8x16_t __ret;
  12655. __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12656. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12657. return __ret;
  12658. }
  12659. #endif
  12660. #ifdef __LITTLE_ENDIAN__
  12661. __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
  12662. uint32x4_t __ret;
  12663. __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
  12664. return __ret;
  12665. }
  12666. #else
  12667. __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
  12668. uint32x4_t __ret;
  12669. __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
  12670. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12671. return __ret;
  12672. }
  12673. #endif
  12674. #ifdef __LITTLE_ENDIAN__
  12675. __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
  12676. uint64x2_t __ret;
  12677. __ret = (uint64x2_t) {__p0, __p0};
  12678. return __ret;
  12679. }
  12680. #else
  12681. __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
  12682. uint64x2_t __ret;
  12683. __ret = (uint64x2_t) {__p0, __p0};
  12684. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12685. return __ret;
  12686. }
  12687. #endif
  12688. #ifdef __LITTLE_ENDIAN__
  12689. __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
  12690. uint16x8_t __ret;
  12691. __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12692. return __ret;
  12693. }
  12694. #else
  12695. __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
  12696. uint16x8_t __ret;
  12697. __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12698. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12699. return __ret;
  12700. }
  12701. #endif
  12702. #ifdef __LITTLE_ENDIAN__
  12703. __ai int8x16_t vmovq_n_s8(int8_t __p0) {
  12704. int8x16_t __ret;
  12705. __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12706. return __ret;
  12707. }
  12708. #else
  12709. __ai int8x16_t vmovq_n_s8(int8_t __p0) {
  12710. int8x16_t __ret;
  12711. __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12712. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  12713. return __ret;
  12714. }
  12715. #endif
  12716. #ifdef __LITTLE_ENDIAN__
  12717. __ai float32x4_t vmovq_n_f32(float32_t __p0) {
  12718. float32x4_t __ret;
  12719. __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
  12720. return __ret;
  12721. }
  12722. #else
  12723. __ai float32x4_t vmovq_n_f32(float32_t __p0) {
  12724. float32x4_t __ret;
  12725. __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
  12726. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12727. return __ret;
  12728. }
  12729. #endif
  12730. #ifdef __LITTLE_ENDIAN__
  12731. #define vmovq_n_f16(__p0) __extension__ ({ \
  12732. float16_t __s0 = __p0; \
  12733. float16x8_t __ret; \
  12734. __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
  12735. __ret; \
  12736. })
  12737. #else
  12738. #define vmovq_n_f16(__p0) __extension__ ({ \
  12739. float16_t __s0 = __p0; \
  12740. float16x8_t __ret; \
  12741. __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
  12742. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  12743. __ret; \
  12744. })
  12745. #endif
  12746. #ifdef __LITTLE_ENDIAN__
  12747. __ai int32x4_t vmovq_n_s32(int32_t __p0) {
  12748. int32x4_t __ret;
  12749. __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
  12750. return __ret;
  12751. }
  12752. #else
  12753. __ai int32x4_t vmovq_n_s32(int32_t __p0) {
  12754. int32x4_t __ret;
  12755. __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
  12756. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12757. return __ret;
  12758. }
  12759. #endif
  12760. #ifdef __LITTLE_ENDIAN__
  12761. __ai int64x2_t vmovq_n_s64(int64_t __p0) {
  12762. int64x2_t __ret;
  12763. __ret = (int64x2_t) {__p0, __p0};
  12764. return __ret;
  12765. }
  12766. #else
  12767. __ai int64x2_t vmovq_n_s64(int64_t __p0) {
  12768. int64x2_t __ret;
  12769. __ret = (int64x2_t) {__p0, __p0};
  12770. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12771. return __ret;
  12772. }
  12773. #endif
  12774. #ifdef __LITTLE_ENDIAN__
  12775. __ai int16x8_t vmovq_n_s16(int16_t __p0) {
  12776. int16x8_t __ret;
  12777. __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12778. return __ret;
  12779. }
  12780. #else
  12781. __ai int16x8_t vmovq_n_s16(int16_t __p0) {
  12782. int16x8_t __ret;
  12783. __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12784. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12785. return __ret;
  12786. }
  12787. #endif
  12788. #ifdef __LITTLE_ENDIAN__
  12789. __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
  12790. uint8x8_t __ret;
  12791. __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12792. return __ret;
  12793. }
  12794. #else
  12795. __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
  12796. uint8x8_t __ret;
  12797. __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12798. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12799. return __ret;
  12800. }
  12801. #endif
  12802. #ifdef __LITTLE_ENDIAN__
  12803. __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
  12804. uint32x2_t __ret;
  12805. __ret = (uint32x2_t) {__p0, __p0};
  12806. return __ret;
  12807. }
  12808. #else
  12809. __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
  12810. uint32x2_t __ret;
  12811. __ret = (uint32x2_t) {__p0, __p0};
  12812. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12813. return __ret;
  12814. }
  12815. #endif
  12816. #ifdef __LITTLE_ENDIAN__
  12817. __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
  12818. uint64x1_t __ret;
  12819. __ret = (uint64x1_t) {__p0};
  12820. return __ret;
  12821. }
  12822. #else
  12823. __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
  12824. uint64x1_t __ret;
  12825. __ret = (uint64x1_t) {__p0};
  12826. return __ret;
  12827. }
  12828. #endif
  12829. #ifdef __LITTLE_ENDIAN__
  12830. __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
  12831. uint16x4_t __ret;
  12832. __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
  12833. return __ret;
  12834. }
  12835. #else
  12836. __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
  12837. uint16x4_t __ret;
  12838. __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
  12839. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12840. return __ret;
  12841. }
  12842. #endif
  12843. #ifdef __LITTLE_ENDIAN__
  12844. __ai int8x8_t vmov_n_s8(int8_t __p0) {
  12845. int8x8_t __ret;
  12846. __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12847. return __ret;
  12848. }
  12849. #else
  12850. __ai int8x8_t vmov_n_s8(int8_t __p0) {
  12851. int8x8_t __ret;
  12852. __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
  12853. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12854. return __ret;
  12855. }
  12856. #endif
  12857. #ifdef __LITTLE_ENDIAN__
  12858. __ai float32x2_t vmov_n_f32(float32_t __p0) {
  12859. float32x2_t __ret;
  12860. __ret = (float32x2_t) {__p0, __p0};
  12861. return __ret;
  12862. }
  12863. #else
  12864. __ai float32x2_t vmov_n_f32(float32_t __p0) {
  12865. float32x2_t __ret;
  12866. __ret = (float32x2_t) {__p0, __p0};
  12867. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12868. return __ret;
  12869. }
  12870. #endif
  12871. #ifdef __LITTLE_ENDIAN__
  12872. #define vmov_n_f16(__p0) __extension__ ({ \
  12873. float16_t __s0 = __p0; \
  12874. float16x4_t __ret; \
  12875. __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
  12876. __ret; \
  12877. })
  12878. #else
  12879. #define vmov_n_f16(__p0) __extension__ ({ \
  12880. float16_t __s0 = __p0; \
  12881. float16x4_t __ret; \
  12882. __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
  12883. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  12884. __ret; \
  12885. })
  12886. #endif
  12887. #ifdef __LITTLE_ENDIAN__
  12888. __ai int32x2_t vmov_n_s32(int32_t __p0) {
  12889. int32x2_t __ret;
  12890. __ret = (int32x2_t) {__p0, __p0};
  12891. return __ret;
  12892. }
  12893. #else
  12894. __ai int32x2_t vmov_n_s32(int32_t __p0) {
  12895. int32x2_t __ret;
  12896. __ret = (int32x2_t) {__p0, __p0};
  12897. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12898. return __ret;
  12899. }
  12900. #endif
  12901. #ifdef __LITTLE_ENDIAN__
  12902. __ai int64x1_t vmov_n_s64(int64_t __p0) {
  12903. int64x1_t __ret;
  12904. __ret = (int64x1_t) {__p0};
  12905. return __ret;
  12906. }
  12907. #else
  12908. __ai int64x1_t vmov_n_s64(int64_t __p0) {
  12909. int64x1_t __ret;
  12910. __ret = (int64x1_t) {__p0};
  12911. return __ret;
  12912. }
  12913. #endif
  12914. #ifdef __LITTLE_ENDIAN__
  12915. __ai int16x4_t vmov_n_s16(int16_t __p0) {
  12916. int16x4_t __ret;
  12917. __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
  12918. return __ret;
  12919. }
  12920. #else
  12921. __ai int16x4_t vmov_n_s16(int16_t __p0) {
  12922. int16x4_t __ret;
  12923. __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
  12924. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12925. return __ret;
  12926. }
  12927. #endif
  12928. #ifdef __LITTLE_ENDIAN__
  12929. __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
  12930. uint16x8_t __ret;
  12931. __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
  12932. return __ret;
  12933. }
  12934. #else
  12935. __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
  12936. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12937. uint16x8_t __ret;
  12938. __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
  12939. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  12940. return __ret;
  12941. }
  12942. __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
  12943. uint16x8_t __ret;
  12944. __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
  12945. return __ret;
  12946. }
  12947. #endif
  12948. #ifdef __LITTLE_ENDIAN__
  12949. __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
  12950. uint64x2_t __ret;
  12951. __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
  12952. return __ret;
  12953. }
  12954. #else
  12955. __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
  12956. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  12957. uint64x2_t __ret;
  12958. __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
  12959. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  12960. return __ret;
  12961. }
  12962. __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
  12963. uint64x2_t __ret;
  12964. __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
  12965. return __ret;
  12966. }
  12967. #endif
  12968. #ifdef __LITTLE_ENDIAN__
  12969. __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
  12970. uint32x4_t __ret;
  12971. __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
  12972. return __ret;
  12973. }
  12974. #else
  12975. __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
  12976. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  12977. uint32x4_t __ret;
  12978. __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
  12979. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  12980. return __ret;
  12981. }
  12982. __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
  12983. uint32x4_t __ret;
  12984. __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
  12985. return __ret;
  12986. }
  12987. #endif
  12988. #ifdef __LITTLE_ENDIAN__
  12989. __ai int16x8_t vmovl_s8(int8x8_t __p0) {
  12990. int16x8_t __ret;
  12991. __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
  12992. return __ret;
  12993. }
  12994. #else
  12995. __ai int16x8_t vmovl_s8(int8x8_t __p0) {
  12996. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  12997. int16x8_t __ret;
  12998. __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
  12999. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13000. return __ret;
  13001. }
  13002. __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
  13003. int16x8_t __ret;
  13004. __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
  13005. return __ret;
  13006. }
  13007. #endif
  13008. #ifdef __LITTLE_ENDIAN__
  13009. __ai int64x2_t vmovl_s32(int32x2_t __p0) {
  13010. int64x2_t __ret;
  13011. __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
  13012. return __ret;
  13013. }
  13014. #else
  13015. __ai int64x2_t vmovl_s32(int32x2_t __p0) {
  13016. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13017. int64x2_t __ret;
  13018. __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
  13019. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13020. return __ret;
  13021. }
  13022. __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
  13023. int64x2_t __ret;
  13024. __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
  13025. return __ret;
  13026. }
  13027. #endif
  13028. #ifdef __LITTLE_ENDIAN__
  13029. __ai int32x4_t vmovl_s16(int16x4_t __p0) {
  13030. int32x4_t __ret;
  13031. __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
  13032. return __ret;
  13033. }
  13034. #else
  13035. __ai int32x4_t vmovl_s16(int16x4_t __p0) {
  13036. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13037. int32x4_t __ret;
  13038. __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
  13039. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13040. return __ret;
  13041. }
  13042. __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
  13043. int32x4_t __ret;
  13044. __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
  13045. return __ret;
  13046. }
  13047. #endif
  13048. #ifdef __LITTLE_ENDIAN__
  13049. __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
  13050. uint16x4_t __ret;
  13051. __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
  13052. return __ret;
  13053. }
  13054. #else
  13055. __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
  13056. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13057. uint16x4_t __ret;
  13058. __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
  13059. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13060. return __ret;
  13061. }
  13062. __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
  13063. uint16x4_t __ret;
  13064. __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
  13065. return __ret;
  13066. }
  13067. #endif
  13068. #ifdef __LITTLE_ENDIAN__
  13069. __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
  13070. uint32x2_t __ret;
  13071. __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
  13072. return __ret;
  13073. }
  13074. #else
  13075. __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
  13076. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13077. uint32x2_t __ret;
  13078. __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
  13079. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13080. return __ret;
  13081. }
  13082. __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
  13083. uint32x2_t __ret;
  13084. __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
  13085. return __ret;
  13086. }
  13087. #endif
  13088. #ifdef __LITTLE_ENDIAN__
  13089. __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
  13090. uint8x8_t __ret;
  13091. __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
  13092. return __ret;
  13093. }
  13094. #else
  13095. __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
  13096. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13097. uint8x8_t __ret;
  13098. __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
  13099. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13100. return __ret;
  13101. }
  13102. __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
  13103. uint8x8_t __ret;
  13104. __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
  13105. return __ret;
  13106. }
  13107. #endif
  13108. #ifdef __LITTLE_ENDIAN__
  13109. __ai int16x4_t vmovn_s32(int32x4_t __p0) {
  13110. int16x4_t __ret;
  13111. __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
  13112. return __ret;
  13113. }
  13114. #else
  13115. __ai int16x4_t vmovn_s32(int32x4_t __p0) {
  13116. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13117. int16x4_t __ret;
  13118. __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
  13119. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13120. return __ret;
  13121. }
  13122. __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
  13123. int16x4_t __ret;
  13124. __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
  13125. return __ret;
  13126. }
  13127. #endif
  13128. #ifdef __LITTLE_ENDIAN__
  13129. __ai int32x2_t vmovn_s64(int64x2_t __p0) {
  13130. int32x2_t __ret;
  13131. __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
  13132. return __ret;
  13133. }
  13134. #else
  13135. __ai int32x2_t vmovn_s64(int64x2_t __p0) {
  13136. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13137. int32x2_t __ret;
  13138. __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
  13139. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13140. return __ret;
  13141. }
  13142. __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
  13143. int32x2_t __ret;
  13144. __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
  13145. return __ret;
  13146. }
  13147. #endif
  13148. #ifdef __LITTLE_ENDIAN__
  13149. __ai int8x8_t vmovn_s16(int16x8_t __p0) {
  13150. int8x8_t __ret;
  13151. __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
  13152. return __ret;
  13153. }
  13154. #else
  13155. __ai int8x8_t vmovn_s16(int16x8_t __p0) {
  13156. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13157. int8x8_t __ret;
  13158. __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
  13159. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13160. return __ret;
  13161. }
  13162. __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
  13163. int8x8_t __ret;
  13164. __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
  13165. return __ret;
  13166. }
  13167. #endif
  13168. #ifdef __LITTLE_ENDIAN__
  13169. __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  13170. uint8x16_t __ret;
  13171. __ret = __p0 * __p1;
  13172. return __ret;
  13173. }
  13174. #else
  13175. __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  13176. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13177. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13178. uint8x16_t __ret;
  13179. __ret = __rev0 * __rev1;
  13180. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13181. return __ret;
  13182. }
  13183. #endif
  13184. #ifdef __LITTLE_ENDIAN__
  13185. __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  13186. uint32x4_t __ret;
  13187. __ret = __p0 * __p1;
  13188. return __ret;
  13189. }
  13190. #else
  13191. __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  13192. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13193. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13194. uint32x4_t __ret;
  13195. __ret = __rev0 * __rev1;
  13196. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13197. return __ret;
  13198. }
  13199. #endif
  13200. #ifdef __LITTLE_ENDIAN__
  13201. __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  13202. uint16x8_t __ret;
  13203. __ret = __p0 * __p1;
  13204. return __ret;
  13205. }
  13206. #else
  13207. __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  13208. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13209. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13210. uint16x8_t __ret;
  13211. __ret = __rev0 * __rev1;
  13212. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13213. return __ret;
  13214. }
  13215. #endif
  13216. #ifdef __LITTLE_ENDIAN__
  13217. __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
  13218. int8x16_t __ret;
  13219. __ret = __p0 * __p1;
  13220. return __ret;
  13221. }
  13222. #else
  13223. __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
  13224. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13225. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13226. int8x16_t __ret;
  13227. __ret = __rev0 * __rev1;
  13228. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13229. return __ret;
  13230. }
  13231. #endif
  13232. #ifdef __LITTLE_ENDIAN__
  13233. __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
  13234. float32x4_t __ret;
  13235. __ret = __p0 * __p1;
  13236. return __ret;
  13237. }
  13238. #else
  13239. __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
  13240. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13241. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13242. float32x4_t __ret;
  13243. __ret = __rev0 * __rev1;
  13244. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13245. return __ret;
  13246. }
  13247. #endif
  13248. #ifdef __LITTLE_ENDIAN__
  13249. __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
  13250. int32x4_t __ret;
  13251. __ret = __p0 * __p1;
  13252. return __ret;
  13253. }
  13254. #else
  13255. __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
  13256. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13257. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13258. int32x4_t __ret;
  13259. __ret = __rev0 * __rev1;
  13260. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13261. return __ret;
  13262. }
  13263. #endif
  13264. #ifdef __LITTLE_ENDIAN__
  13265. __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
  13266. int16x8_t __ret;
  13267. __ret = __p0 * __p1;
  13268. return __ret;
  13269. }
  13270. #else
  13271. __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
  13272. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13273. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13274. int16x8_t __ret;
  13275. __ret = __rev0 * __rev1;
  13276. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13277. return __ret;
  13278. }
  13279. #endif
  13280. #ifdef __LITTLE_ENDIAN__
  13281. __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
  13282. uint8x8_t __ret;
  13283. __ret = __p0 * __p1;
  13284. return __ret;
  13285. }
  13286. #else
  13287. __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
  13288. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13289. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13290. uint8x8_t __ret;
  13291. __ret = __rev0 * __rev1;
  13292. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13293. return __ret;
  13294. }
  13295. #endif
  13296. #ifdef __LITTLE_ENDIAN__
  13297. __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
  13298. uint32x2_t __ret;
  13299. __ret = __p0 * __p1;
  13300. return __ret;
  13301. }
  13302. #else
  13303. __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
  13304. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13305. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  13306. uint32x2_t __ret;
  13307. __ret = __rev0 * __rev1;
  13308. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13309. return __ret;
  13310. }
  13311. #endif
  13312. #ifdef __LITTLE_ENDIAN__
  13313. __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
  13314. uint16x4_t __ret;
  13315. __ret = __p0 * __p1;
  13316. return __ret;
  13317. }
  13318. #else
  13319. __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
  13320. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13321. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13322. uint16x4_t __ret;
  13323. __ret = __rev0 * __rev1;
  13324. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13325. return __ret;
  13326. }
  13327. #endif
  13328. #ifdef __LITTLE_ENDIAN__
  13329. __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
  13330. int8x8_t __ret;
  13331. __ret = __p0 * __p1;
  13332. return __ret;
  13333. }
  13334. #else
  13335. __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
  13336. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13337. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13338. int8x8_t __ret;
  13339. __ret = __rev0 * __rev1;
  13340. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13341. return __ret;
  13342. }
  13343. #endif
  13344. #ifdef __LITTLE_ENDIAN__
  13345. __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
  13346. float32x2_t __ret;
  13347. __ret = __p0 * __p1;
  13348. return __ret;
  13349. }
  13350. #else
  13351. __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
  13352. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13353. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  13354. float32x2_t __ret;
  13355. __ret = __rev0 * __rev1;
  13356. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13357. return __ret;
  13358. }
  13359. #endif
  13360. #ifdef __LITTLE_ENDIAN__
  13361. __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
  13362. int32x2_t __ret;
  13363. __ret = __p0 * __p1;
  13364. return __ret;
  13365. }
  13366. #else
  13367. __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
  13368. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13369. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  13370. int32x2_t __ret;
  13371. __ret = __rev0 * __rev1;
  13372. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13373. return __ret;
  13374. }
  13375. #endif
  13376. #ifdef __LITTLE_ENDIAN__
  13377. __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
  13378. int16x4_t __ret;
  13379. __ret = __p0 * __p1;
  13380. return __ret;
  13381. }
  13382. #else
  13383. __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
  13384. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13385. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13386. int16x4_t __ret;
  13387. __ret = __rev0 * __rev1;
  13388. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13389. return __ret;
  13390. }
  13391. #endif
  13392. #ifdef __LITTLE_ENDIAN__
  13393. __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
  13394. poly8x8_t __ret;
  13395. __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
  13396. return __ret;
  13397. }
  13398. #else
  13399. __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
  13400. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13401. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13402. poly8x8_t __ret;
  13403. __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
  13404. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13405. return __ret;
  13406. }
  13407. #endif
  13408. #ifdef __LITTLE_ENDIAN__
  13409. __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  13410. poly8x16_t __ret;
  13411. __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
  13412. return __ret;
  13413. }
  13414. #else
  13415. __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  13416. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13417. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13418. poly8x16_t __ret;
  13419. __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
  13420. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  13421. return __ret;
  13422. }
  13423. #endif
  13424. #ifdef __LITTLE_ENDIAN__
  13425. #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13426. uint32x4_t __s0 = __p0; \
  13427. uint32x2_t __s1 = __p1; \
  13428. uint32x4_t __ret; \
  13429. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  13430. __ret; \
  13431. })
  13432. #else
  13433. #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13434. uint32x4_t __s0 = __p0; \
  13435. uint32x2_t __s1 = __p1; \
  13436. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13437. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13438. uint32x4_t __ret; \
  13439. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  13440. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13441. __ret; \
  13442. })
  13443. #endif
  13444. #ifdef __LITTLE_ENDIAN__
  13445. #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13446. uint16x8_t __s0 = __p0; \
  13447. uint16x4_t __s1 = __p1; \
  13448. uint16x8_t __ret; \
  13449. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  13450. __ret; \
  13451. })
  13452. #else
  13453. #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13454. uint16x8_t __s0 = __p0; \
  13455. uint16x4_t __s1 = __p1; \
  13456. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  13457. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13458. uint16x8_t __ret; \
  13459. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  13460. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  13461. __ret; \
  13462. })
  13463. #endif
  13464. #ifdef __LITTLE_ENDIAN__
  13465. #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  13466. float32x4_t __s0 = __p0; \
  13467. float32x2_t __s1 = __p1; \
  13468. float32x4_t __ret; \
  13469. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  13470. __ret; \
  13471. })
  13472. #else
  13473. #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  13474. float32x4_t __s0 = __p0; \
  13475. float32x2_t __s1 = __p1; \
  13476. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13477. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13478. float32x4_t __ret; \
  13479. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  13480. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13481. __ret; \
  13482. })
  13483. #endif
  13484. #ifdef __LITTLE_ENDIAN__
  13485. #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13486. int32x4_t __s0 = __p0; \
  13487. int32x2_t __s1 = __p1; \
  13488. int32x4_t __ret; \
  13489. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  13490. __ret; \
  13491. })
  13492. #else
  13493. #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13494. int32x4_t __s0 = __p0; \
  13495. int32x2_t __s1 = __p1; \
  13496. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13497. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13498. int32x4_t __ret; \
  13499. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  13500. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13501. __ret; \
  13502. })
  13503. #endif
  13504. #ifdef __LITTLE_ENDIAN__
  13505. #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13506. int16x8_t __s0 = __p0; \
  13507. int16x4_t __s1 = __p1; \
  13508. int16x8_t __ret; \
  13509. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  13510. __ret; \
  13511. })
  13512. #else
  13513. #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13514. int16x8_t __s0 = __p0; \
  13515. int16x4_t __s1 = __p1; \
  13516. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  13517. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13518. int16x8_t __ret; \
  13519. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  13520. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  13521. __ret; \
  13522. })
  13523. #endif
  13524. #ifdef __LITTLE_ENDIAN__
  13525. #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13526. uint32x2_t __s0 = __p0; \
  13527. uint32x2_t __s1 = __p1; \
  13528. uint32x2_t __ret; \
  13529. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  13530. __ret; \
  13531. })
  13532. #else
  13533. #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13534. uint32x2_t __s0 = __p0; \
  13535. uint32x2_t __s1 = __p1; \
  13536. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  13537. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13538. uint32x2_t __ret; \
  13539. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  13540. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  13541. __ret; \
  13542. })
  13543. #endif
  13544. #ifdef __LITTLE_ENDIAN__
  13545. #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13546. uint16x4_t __s0 = __p0; \
  13547. uint16x4_t __s1 = __p1; \
  13548. uint16x4_t __ret; \
  13549. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  13550. __ret; \
  13551. })
  13552. #else
  13553. #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13554. uint16x4_t __s0 = __p0; \
  13555. uint16x4_t __s1 = __p1; \
  13556. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13557. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13558. uint16x4_t __ret; \
  13559. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  13560. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13561. __ret; \
  13562. })
  13563. #endif
  13564. #ifdef __LITTLE_ENDIAN__
  13565. #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  13566. float32x2_t __s0 = __p0; \
  13567. float32x2_t __s1 = __p1; \
  13568. float32x2_t __ret; \
  13569. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  13570. __ret; \
  13571. })
  13572. #else
  13573. #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  13574. float32x2_t __s0 = __p0; \
  13575. float32x2_t __s1 = __p1; \
  13576. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  13577. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13578. float32x2_t __ret; \
  13579. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  13580. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  13581. __ret; \
  13582. })
  13583. #endif
  13584. #ifdef __LITTLE_ENDIAN__
  13585. #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13586. int32x2_t __s0 = __p0; \
  13587. int32x2_t __s1 = __p1; \
  13588. int32x2_t __ret; \
  13589. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  13590. __ret; \
  13591. })
  13592. #else
  13593. #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13594. int32x2_t __s0 = __p0; \
  13595. int32x2_t __s1 = __p1; \
  13596. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  13597. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13598. int32x2_t __ret; \
  13599. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  13600. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  13601. __ret; \
  13602. })
  13603. #endif
  13604. #ifdef __LITTLE_ENDIAN__
  13605. #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13606. int16x4_t __s0 = __p0; \
  13607. int16x4_t __s1 = __p1; \
  13608. int16x4_t __ret; \
  13609. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  13610. __ret; \
  13611. })
  13612. #else
  13613. #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13614. int16x4_t __s0 = __p0; \
  13615. int16x4_t __s1 = __p1; \
  13616. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13617. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13618. int16x4_t __ret; \
  13619. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  13620. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13621. __ret; \
  13622. })
  13623. #endif
  13624. #ifdef __LITTLE_ENDIAN__
  13625. __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
  13626. uint32x4_t __ret;
  13627. __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
  13628. return __ret;
  13629. }
  13630. #else
  13631. __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
  13632. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13633. uint32x4_t __ret;
  13634. __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
  13635. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13636. return __ret;
  13637. }
  13638. #endif
  13639. #ifdef __LITTLE_ENDIAN__
  13640. __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
  13641. uint16x8_t __ret;
  13642. __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
  13643. return __ret;
  13644. }
  13645. #else
  13646. __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
  13647. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13648. uint16x8_t __ret;
  13649. __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
  13650. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13651. return __ret;
  13652. }
  13653. #endif
  13654. #ifdef __LITTLE_ENDIAN__
  13655. __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
  13656. float32x4_t __ret;
  13657. __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
  13658. return __ret;
  13659. }
  13660. #else
  13661. __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
  13662. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13663. float32x4_t __ret;
  13664. __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
  13665. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13666. return __ret;
  13667. }
  13668. #endif
  13669. #ifdef __LITTLE_ENDIAN__
  13670. __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
  13671. int32x4_t __ret;
  13672. __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
  13673. return __ret;
  13674. }
  13675. #else
  13676. __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
  13677. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13678. int32x4_t __ret;
  13679. __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
  13680. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13681. return __ret;
  13682. }
  13683. #endif
  13684. #ifdef __LITTLE_ENDIAN__
  13685. __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
  13686. int16x8_t __ret;
  13687. __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
  13688. return __ret;
  13689. }
  13690. #else
  13691. __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
  13692. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13693. int16x8_t __ret;
  13694. __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
  13695. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13696. return __ret;
  13697. }
  13698. #endif
  13699. #ifdef __LITTLE_ENDIAN__
  13700. __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
  13701. uint32x2_t __ret;
  13702. __ret = __p0 * (uint32x2_t) {__p1, __p1};
  13703. return __ret;
  13704. }
  13705. #else
  13706. __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
  13707. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13708. uint32x2_t __ret;
  13709. __ret = __rev0 * (uint32x2_t) {__p1, __p1};
  13710. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13711. return __ret;
  13712. }
  13713. #endif
  13714. #ifdef __LITTLE_ENDIAN__
  13715. __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
  13716. uint16x4_t __ret;
  13717. __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
  13718. return __ret;
  13719. }
  13720. #else
  13721. __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
  13722. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13723. uint16x4_t __ret;
  13724. __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
  13725. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13726. return __ret;
  13727. }
  13728. #endif
  13729. #ifdef __LITTLE_ENDIAN__
  13730. __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
  13731. float32x2_t __ret;
  13732. __ret = __p0 * (float32x2_t) {__p1, __p1};
  13733. return __ret;
  13734. }
  13735. #else
  13736. __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
  13737. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13738. float32x2_t __ret;
  13739. __ret = __rev0 * (float32x2_t) {__p1, __p1};
  13740. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13741. return __ret;
  13742. }
  13743. #endif
  13744. #ifdef __LITTLE_ENDIAN__
  13745. __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
  13746. int32x2_t __ret;
  13747. __ret = __p0 * (int32x2_t) {__p1, __p1};
  13748. return __ret;
  13749. }
  13750. #else
  13751. __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
  13752. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13753. int32x2_t __ret;
  13754. __ret = __rev0 * (int32x2_t) {__p1, __p1};
  13755. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13756. return __ret;
  13757. }
  13758. #endif
  13759. #ifdef __LITTLE_ENDIAN__
  13760. __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
  13761. int16x4_t __ret;
  13762. __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
  13763. return __ret;
  13764. }
  13765. #else
  13766. __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
  13767. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13768. int16x4_t __ret;
  13769. __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
  13770. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13771. return __ret;
  13772. }
  13773. #endif
  13774. #ifdef __LITTLE_ENDIAN__
  13775. __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
  13776. poly16x8_t __ret;
  13777. __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
  13778. return __ret;
  13779. }
  13780. #else
  13781. __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
  13782. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13783. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13784. poly16x8_t __ret;
  13785. __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
  13786. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13787. return __ret;
  13788. }
  13789. __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
  13790. poly16x8_t __ret;
  13791. __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
  13792. return __ret;
  13793. }
  13794. #endif
  13795. #ifdef __LITTLE_ENDIAN__
  13796. __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
  13797. uint16x8_t __ret;
  13798. __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
  13799. return __ret;
  13800. }
  13801. #else
  13802. __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
  13803. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13804. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13805. uint16x8_t __ret;
  13806. __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
  13807. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13808. return __ret;
  13809. }
  13810. __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
  13811. uint16x8_t __ret;
  13812. __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
  13813. return __ret;
  13814. }
  13815. #endif
  13816. #ifdef __LITTLE_ENDIAN__
  13817. __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
  13818. uint64x2_t __ret;
  13819. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
  13820. return __ret;
  13821. }
  13822. #else
  13823. __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
  13824. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13825. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  13826. uint64x2_t __ret;
  13827. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
  13828. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13829. return __ret;
  13830. }
  13831. __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
  13832. uint64x2_t __ret;
  13833. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
  13834. return __ret;
  13835. }
  13836. #endif
  13837. #ifdef __LITTLE_ENDIAN__
  13838. __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
  13839. uint32x4_t __ret;
  13840. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
  13841. return __ret;
  13842. }
  13843. #else
  13844. __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
  13845. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13846. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13847. uint32x4_t __ret;
  13848. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
  13849. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13850. return __ret;
  13851. }
  13852. __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
  13853. uint32x4_t __ret;
  13854. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
  13855. return __ret;
  13856. }
  13857. #endif
  13858. #ifdef __LITTLE_ENDIAN__
  13859. __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
  13860. int16x8_t __ret;
  13861. __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
  13862. return __ret;
  13863. }
  13864. #else
  13865. __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
  13866. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  13867. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  13868. int16x8_t __ret;
  13869. __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
  13870. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  13871. return __ret;
  13872. }
  13873. __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
  13874. int16x8_t __ret;
  13875. __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
  13876. return __ret;
  13877. }
  13878. #endif
  13879. #ifdef __LITTLE_ENDIAN__
  13880. __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
  13881. int64x2_t __ret;
  13882. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
  13883. return __ret;
  13884. }
  13885. #else
  13886. __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
  13887. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  13888. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  13889. int64x2_t __ret;
  13890. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
  13891. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  13892. return __ret;
  13893. }
  13894. __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
  13895. int64x2_t __ret;
  13896. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
  13897. return __ret;
  13898. }
  13899. #endif
  13900. #ifdef __LITTLE_ENDIAN__
  13901. __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
  13902. int32x4_t __ret;
  13903. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
  13904. return __ret;
  13905. }
  13906. #else
  13907. __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
  13908. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  13909. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  13910. int32x4_t __ret;
  13911. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
  13912. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  13913. return __ret;
  13914. }
  13915. __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
  13916. int32x4_t __ret;
  13917. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
  13918. return __ret;
  13919. }
  13920. #endif
  13921. #ifdef __LITTLE_ENDIAN__
  13922. #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13923. uint32x2_t __s0 = __p0; \
  13924. uint32x2_t __s1 = __p1; \
  13925. uint64x2_t __ret; \
  13926. __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  13927. __ret; \
  13928. })
  13929. #else
  13930. #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  13931. uint32x2_t __s0 = __p0; \
  13932. uint32x2_t __s1 = __p1; \
  13933. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  13934. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13935. uint64x2_t __ret; \
  13936. __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  13937. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  13938. __ret; \
  13939. })
  13940. #endif
  13941. #ifdef __LITTLE_ENDIAN__
  13942. #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13943. uint16x4_t __s0 = __p0; \
  13944. uint16x4_t __s1 = __p1; \
  13945. uint32x4_t __ret; \
  13946. __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  13947. __ret; \
  13948. })
  13949. #else
  13950. #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  13951. uint16x4_t __s0 = __p0; \
  13952. uint16x4_t __s1 = __p1; \
  13953. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13954. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13955. uint32x4_t __ret; \
  13956. __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  13957. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13958. __ret; \
  13959. })
  13960. #endif
  13961. #ifdef __LITTLE_ENDIAN__
  13962. #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13963. int32x2_t __s0 = __p0; \
  13964. int32x2_t __s1 = __p1; \
  13965. int64x2_t __ret; \
  13966. __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  13967. __ret; \
  13968. })
  13969. #else
  13970. #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  13971. int32x2_t __s0 = __p0; \
  13972. int32x2_t __s1 = __p1; \
  13973. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  13974. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  13975. int64x2_t __ret; \
  13976. __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  13977. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  13978. __ret; \
  13979. })
  13980. #endif
  13981. #ifdef __LITTLE_ENDIAN__
  13982. #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13983. int16x4_t __s0 = __p0; \
  13984. int16x4_t __s1 = __p1; \
  13985. int32x4_t __ret; \
  13986. __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  13987. __ret; \
  13988. })
  13989. #else
  13990. #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  13991. int16x4_t __s0 = __p0; \
  13992. int16x4_t __s1 = __p1; \
  13993. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  13994. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  13995. int32x4_t __ret; \
  13996. __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  13997. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  13998. __ret; \
  13999. })
  14000. #endif
  14001. #ifdef __LITTLE_ENDIAN__
  14002. __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
  14003. uint64x2_t __ret;
  14004. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
  14005. return __ret;
  14006. }
  14007. #else
  14008. __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
  14009. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14010. uint64x2_t __ret;
  14011. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
  14012. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14013. return __ret;
  14014. }
  14015. __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
  14016. uint64x2_t __ret;
  14017. __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
  14018. return __ret;
  14019. }
  14020. #endif
  14021. #ifdef __LITTLE_ENDIAN__
  14022. __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
  14023. uint32x4_t __ret;
  14024. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
  14025. return __ret;
  14026. }
  14027. #else
  14028. __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
  14029. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14030. uint32x4_t __ret;
  14031. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
  14032. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14033. return __ret;
  14034. }
  14035. __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
  14036. uint32x4_t __ret;
  14037. __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
  14038. return __ret;
  14039. }
  14040. #endif
  14041. #ifdef __LITTLE_ENDIAN__
  14042. __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
  14043. int64x2_t __ret;
  14044. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  14045. return __ret;
  14046. }
  14047. #else
  14048. __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
  14049. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14050. int64x2_t __ret;
  14051. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  14052. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14053. return __ret;
  14054. }
  14055. __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
  14056. int64x2_t __ret;
  14057. __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  14058. return __ret;
  14059. }
  14060. #endif
  14061. #ifdef __LITTLE_ENDIAN__
  14062. __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
  14063. int32x4_t __ret;
  14064. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  14065. return __ret;
  14066. }
  14067. #else
  14068. __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
  14069. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14070. int32x4_t __ret;
  14071. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  14072. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14073. return __ret;
  14074. }
  14075. __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
  14076. int32x4_t __ret;
  14077. __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  14078. return __ret;
  14079. }
  14080. #endif
  14081. #ifdef __LITTLE_ENDIAN__
  14082. __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
  14083. poly8x8_t __ret;
  14084. __ret = ~__p0;
  14085. return __ret;
  14086. }
  14087. #else
  14088. __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
  14089. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14090. poly8x8_t __ret;
  14091. __ret = ~__rev0;
  14092. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14093. return __ret;
  14094. }
  14095. #endif
  14096. #ifdef __LITTLE_ENDIAN__
  14097. __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
  14098. poly8x16_t __ret;
  14099. __ret = ~__p0;
  14100. return __ret;
  14101. }
  14102. #else
  14103. __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
  14104. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14105. poly8x16_t __ret;
  14106. __ret = ~__rev0;
  14107. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14108. return __ret;
  14109. }
  14110. #endif
  14111. #ifdef __LITTLE_ENDIAN__
  14112. __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
  14113. uint8x16_t __ret;
  14114. __ret = ~__p0;
  14115. return __ret;
  14116. }
  14117. #else
  14118. __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
  14119. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14120. uint8x16_t __ret;
  14121. __ret = ~__rev0;
  14122. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14123. return __ret;
  14124. }
  14125. #endif
  14126. #ifdef __LITTLE_ENDIAN__
  14127. __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
  14128. uint32x4_t __ret;
  14129. __ret = ~__p0;
  14130. return __ret;
  14131. }
  14132. #else
  14133. __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
  14134. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14135. uint32x4_t __ret;
  14136. __ret = ~__rev0;
  14137. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14138. return __ret;
  14139. }
  14140. #endif
  14141. #ifdef __LITTLE_ENDIAN__
  14142. __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
  14143. uint16x8_t __ret;
  14144. __ret = ~__p0;
  14145. return __ret;
  14146. }
  14147. #else
  14148. __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
  14149. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14150. uint16x8_t __ret;
  14151. __ret = ~__rev0;
  14152. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14153. return __ret;
  14154. }
  14155. #endif
  14156. #ifdef __LITTLE_ENDIAN__
  14157. __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
  14158. int8x16_t __ret;
  14159. __ret = ~__p0;
  14160. return __ret;
  14161. }
  14162. #else
  14163. __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
  14164. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14165. int8x16_t __ret;
  14166. __ret = ~__rev0;
  14167. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14168. return __ret;
  14169. }
  14170. #endif
  14171. #ifdef __LITTLE_ENDIAN__
  14172. __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
  14173. int32x4_t __ret;
  14174. __ret = ~__p0;
  14175. return __ret;
  14176. }
  14177. #else
  14178. __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
  14179. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14180. int32x4_t __ret;
  14181. __ret = ~__rev0;
  14182. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14183. return __ret;
  14184. }
  14185. #endif
  14186. #ifdef __LITTLE_ENDIAN__
  14187. __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
  14188. int16x8_t __ret;
  14189. __ret = ~__p0;
  14190. return __ret;
  14191. }
  14192. #else
  14193. __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
  14194. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14195. int16x8_t __ret;
  14196. __ret = ~__rev0;
  14197. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14198. return __ret;
  14199. }
  14200. #endif
  14201. #ifdef __LITTLE_ENDIAN__
  14202. __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
  14203. uint8x8_t __ret;
  14204. __ret = ~__p0;
  14205. return __ret;
  14206. }
  14207. #else
  14208. __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
  14209. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14210. uint8x8_t __ret;
  14211. __ret = ~__rev0;
  14212. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14213. return __ret;
  14214. }
  14215. #endif
  14216. #ifdef __LITTLE_ENDIAN__
  14217. __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
  14218. uint32x2_t __ret;
  14219. __ret = ~__p0;
  14220. return __ret;
  14221. }
  14222. #else
  14223. __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
  14224. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14225. uint32x2_t __ret;
  14226. __ret = ~__rev0;
  14227. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14228. return __ret;
  14229. }
  14230. #endif
  14231. #ifdef __LITTLE_ENDIAN__
  14232. __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
  14233. uint16x4_t __ret;
  14234. __ret = ~__p0;
  14235. return __ret;
  14236. }
  14237. #else
  14238. __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
  14239. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14240. uint16x4_t __ret;
  14241. __ret = ~__rev0;
  14242. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14243. return __ret;
  14244. }
  14245. #endif
  14246. #ifdef __LITTLE_ENDIAN__
  14247. __ai int8x8_t vmvn_s8(int8x8_t __p0) {
  14248. int8x8_t __ret;
  14249. __ret = ~__p0;
  14250. return __ret;
  14251. }
  14252. #else
  14253. __ai int8x8_t vmvn_s8(int8x8_t __p0) {
  14254. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14255. int8x8_t __ret;
  14256. __ret = ~__rev0;
  14257. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14258. return __ret;
  14259. }
  14260. #endif
  14261. #ifdef __LITTLE_ENDIAN__
  14262. __ai int32x2_t vmvn_s32(int32x2_t __p0) {
  14263. int32x2_t __ret;
  14264. __ret = ~__p0;
  14265. return __ret;
  14266. }
  14267. #else
  14268. __ai int32x2_t vmvn_s32(int32x2_t __p0) {
  14269. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14270. int32x2_t __ret;
  14271. __ret = ~__rev0;
  14272. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14273. return __ret;
  14274. }
  14275. #endif
  14276. #ifdef __LITTLE_ENDIAN__
  14277. __ai int16x4_t vmvn_s16(int16x4_t __p0) {
  14278. int16x4_t __ret;
  14279. __ret = ~__p0;
  14280. return __ret;
  14281. }
  14282. #else
  14283. __ai int16x4_t vmvn_s16(int16x4_t __p0) {
  14284. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14285. int16x4_t __ret;
  14286. __ret = ~__rev0;
  14287. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14288. return __ret;
  14289. }
  14290. #endif
  14291. #ifdef __LITTLE_ENDIAN__
  14292. __ai int8x16_t vnegq_s8(int8x16_t __p0) {
  14293. int8x16_t __ret;
  14294. __ret = -__p0;
  14295. return __ret;
  14296. }
  14297. #else
  14298. __ai int8x16_t vnegq_s8(int8x16_t __p0) {
  14299. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14300. int8x16_t __ret;
  14301. __ret = -__rev0;
  14302. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14303. return __ret;
  14304. }
  14305. #endif
  14306. #ifdef __LITTLE_ENDIAN__
  14307. __ai float32x4_t vnegq_f32(float32x4_t __p0) {
  14308. float32x4_t __ret;
  14309. __ret = -__p0;
  14310. return __ret;
  14311. }
  14312. #else
  14313. __ai float32x4_t vnegq_f32(float32x4_t __p0) {
  14314. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14315. float32x4_t __ret;
  14316. __ret = -__rev0;
  14317. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14318. return __ret;
  14319. }
  14320. #endif
  14321. #ifdef __LITTLE_ENDIAN__
  14322. __ai int32x4_t vnegq_s32(int32x4_t __p0) {
  14323. int32x4_t __ret;
  14324. __ret = -__p0;
  14325. return __ret;
  14326. }
  14327. #else
  14328. __ai int32x4_t vnegq_s32(int32x4_t __p0) {
  14329. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14330. int32x4_t __ret;
  14331. __ret = -__rev0;
  14332. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14333. return __ret;
  14334. }
  14335. #endif
  14336. #ifdef __LITTLE_ENDIAN__
  14337. __ai int16x8_t vnegq_s16(int16x8_t __p0) {
  14338. int16x8_t __ret;
  14339. __ret = -__p0;
  14340. return __ret;
  14341. }
  14342. #else
  14343. __ai int16x8_t vnegq_s16(int16x8_t __p0) {
  14344. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14345. int16x8_t __ret;
  14346. __ret = -__rev0;
  14347. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14348. return __ret;
  14349. }
  14350. #endif
  14351. #ifdef __LITTLE_ENDIAN__
  14352. __ai int8x8_t vneg_s8(int8x8_t __p0) {
  14353. int8x8_t __ret;
  14354. __ret = -__p0;
  14355. return __ret;
  14356. }
  14357. #else
  14358. __ai int8x8_t vneg_s8(int8x8_t __p0) {
  14359. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14360. int8x8_t __ret;
  14361. __ret = -__rev0;
  14362. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14363. return __ret;
  14364. }
  14365. #endif
  14366. #ifdef __LITTLE_ENDIAN__
  14367. __ai float32x2_t vneg_f32(float32x2_t __p0) {
  14368. float32x2_t __ret;
  14369. __ret = -__p0;
  14370. return __ret;
  14371. }
  14372. #else
  14373. __ai float32x2_t vneg_f32(float32x2_t __p0) {
  14374. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14375. float32x2_t __ret;
  14376. __ret = -__rev0;
  14377. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14378. return __ret;
  14379. }
  14380. #endif
  14381. #ifdef __LITTLE_ENDIAN__
  14382. __ai int32x2_t vneg_s32(int32x2_t __p0) {
  14383. int32x2_t __ret;
  14384. __ret = -__p0;
  14385. return __ret;
  14386. }
  14387. #else
  14388. __ai int32x2_t vneg_s32(int32x2_t __p0) {
  14389. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14390. int32x2_t __ret;
  14391. __ret = -__rev0;
  14392. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14393. return __ret;
  14394. }
  14395. #endif
  14396. #ifdef __LITTLE_ENDIAN__
  14397. __ai int16x4_t vneg_s16(int16x4_t __p0) {
  14398. int16x4_t __ret;
  14399. __ret = -__p0;
  14400. return __ret;
  14401. }
  14402. #else
  14403. __ai int16x4_t vneg_s16(int16x4_t __p0) {
  14404. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14405. int16x4_t __ret;
  14406. __ret = -__rev0;
  14407. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14408. return __ret;
  14409. }
  14410. #endif
  14411. #ifdef __LITTLE_ENDIAN__
  14412. __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  14413. uint8x16_t __ret;
  14414. __ret = __p0 | ~__p1;
  14415. return __ret;
  14416. }
  14417. #else
  14418. __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  14419. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14420. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14421. uint8x16_t __ret;
  14422. __ret = __rev0 | ~__rev1;
  14423. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14424. return __ret;
  14425. }
  14426. #endif
  14427. #ifdef __LITTLE_ENDIAN__
  14428. __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  14429. uint32x4_t __ret;
  14430. __ret = __p0 | ~__p1;
  14431. return __ret;
  14432. }
  14433. #else
  14434. __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  14435. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14436. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14437. uint32x4_t __ret;
  14438. __ret = __rev0 | ~__rev1;
  14439. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14440. return __ret;
  14441. }
  14442. #endif
  14443. #ifdef __LITTLE_ENDIAN__
  14444. __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  14445. uint64x2_t __ret;
  14446. __ret = __p0 | ~__p1;
  14447. return __ret;
  14448. }
  14449. #else
  14450. __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  14451. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14452. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14453. uint64x2_t __ret;
  14454. __ret = __rev0 | ~__rev1;
  14455. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14456. return __ret;
  14457. }
  14458. #endif
  14459. #ifdef __LITTLE_ENDIAN__
  14460. __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  14461. uint16x8_t __ret;
  14462. __ret = __p0 | ~__p1;
  14463. return __ret;
  14464. }
  14465. #else
  14466. __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  14467. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14468. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14469. uint16x8_t __ret;
  14470. __ret = __rev0 | ~__rev1;
  14471. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14472. return __ret;
  14473. }
  14474. #endif
  14475. #ifdef __LITTLE_ENDIAN__
  14476. __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
  14477. int8x16_t __ret;
  14478. __ret = __p0 | ~__p1;
  14479. return __ret;
  14480. }
  14481. #else
  14482. __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
  14483. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14484. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14485. int8x16_t __ret;
  14486. __ret = __rev0 | ~__rev1;
  14487. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14488. return __ret;
  14489. }
  14490. #endif
  14491. #ifdef __LITTLE_ENDIAN__
  14492. __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
  14493. int32x4_t __ret;
  14494. __ret = __p0 | ~__p1;
  14495. return __ret;
  14496. }
  14497. #else
  14498. __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
  14499. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14500. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14501. int32x4_t __ret;
  14502. __ret = __rev0 | ~__rev1;
  14503. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14504. return __ret;
  14505. }
  14506. #endif
  14507. #ifdef __LITTLE_ENDIAN__
  14508. __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
  14509. int64x2_t __ret;
  14510. __ret = __p0 | ~__p1;
  14511. return __ret;
  14512. }
  14513. #else
  14514. __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
  14515. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14516. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14517. int64x2_t __ret;
  14518. __ret = __rev0 | ~__rev1;
  14519. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14520. return __ret;
  14521. }
  14522. #endif
  14523. #ifdef __LITTLE_ENDIAN__
  14524. __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
  14525. int16x8_t __ret;
  14526. __ret = __p0 | ~__p1;
  14527. return __ret;
  14528. }
  14529. #else
  14530. __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
  14531. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14532. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14533. int16x8_t __ret;
  14534. __ret = __rev0 | ~__rev1;
  14535. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14536. return __ret;
  14537. }
  14538. #endif
  14539. #ifdef __LITTLE_ENDIAN__
  14540. __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
  14541. uint8x8_t __ret;
  14542. __ret = __p0 | ~__p1;
  14543. return __ret;
  14544. }
  14545. #else
  14546. __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
  14547. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14548. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14549. uint8x8_t __ret;
  14550. __ret = __rev0 | ~__rev1;
  14551. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14552. return __ret;
  14553. }
  14554. #endif
  14555. #ifdef __LITTLE_ENDIAN__
  14556. __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
  14557. uint32x2_t __ret;
  14558. __ret = __p0 | ~__p1;
  14559. return __ret;
  14560. }
  14561. #else
  14562. __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
  14563. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14564. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14565. uint32x2_t __ret;
  14566. __ret = __rev0 | ~__rev1;
  14567. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14568. return __ret;
  14569. }
  14570. #endif
  14571. #ifdef __LITTLE_ENDIAN__
  14572. __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
  14573. uint64x1_t __ret;
  14574. __ret = __p0 | ~__p1;
  14575. return __ret;
  14576. }
  14577. #else
  14578. __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
  14579. uint64x1_t __ret;
  14580. __ret = __p0 | ~__p1;
  14581. return __ret;
  14582. }
  14583. #endif
  14584. #ifdef __LITTLE_ENDIAN__
  14585. __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
  14586. uint16x4_t __ret;
  14587. __ret = __p0 | ~__p1;
  14588. return __ret;
  14589. }
  14590. #else
  14591. __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
  14592. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14593. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14594. uint16x4_t __ret;
  14595. __ret = __rev0 | ~__rev1;
  14596. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14597. return __ret;
  14598. }
  14599. #endif
  14600. #ifdef __LITTLE_ENDIAN__
  14601. __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
  14602. int8x8_t __ret;
  14603. __ret = __p0 | ~__p1;
  14604. return __ret;
  14605. }
  14606. #else
  14607. __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
  14608. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14609. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14610. int8x8_t __ret;
  14611. __ret = __rev0 | ~__rev1;
  14612. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14613. return __ret;
  14614. }
  14615. #endif
  14616. #ifdef __LITTLE_ENDIAN__
  14617. __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
  14618. int32x2_t __ret;
  14619. __ret = __p0 | ~__p1;
  14620. return __ret;
  14621. }
  14622. #else
  14623. __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
  14624. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14625. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14626. int32x2_t __ret;
  14627. __ret = __rev0 | ~__rev1;
  14628. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14629. return __ret;
  14630. }
  14631. #endif
  14632. #ifdef __LITTLE_ENDIAN__
  14633. __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
  14634. int64x1_t __ret;
  14635. __ret = __p0 | ~__p1;
  14636. return __ret;
  14637. }
  14638. #else
  14639. __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
  14640. int64x1_t __ret;
  14641. __ret = __p0 | ~__p1;
  14642. return __ret;
  14643. }
  14644. #endif
  14645. #ifdef __LITTLE_ENDIAN__
  14646. __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
  14647. int16x4_t __ret;
  14648. __ret = __p0 | ~__p1;
  14649. return __ret;
  14650. }
  14651. #else
  14652. __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
  14653. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14654. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14655. int16x4_t __ret;
  14656. __ret = __rev0 | ~__rev1;
  14657. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14658. return __ret;
  14659. }
  14660. #endif
  14661. #ifdef __LITTLE_ENDIAN__
  14662. __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  14663. uint8x16_t __ret;
  14664. __ret = __p0 | __p1;
  14665. return __ret;
  14666. }
  14667. #else
  14668. __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  14669. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14670. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14671. uint8x16_t __ret;
  14672. __ret = __rev0 | __rev1;
  14673. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14674. return __ret;
  14675. }
  14676. #endif
  14677. #ifdef __LITTLE_ENDIAN__
  14678. __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  14679. uint32x4_t __ret;
  14680. __ret = __p0 | __p1;
  14681. return __ret;
  14682. }
  14683. #else
  14684. __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  14685. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14686. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14687. uint32x4_t __ret;
  14688. __ret = __rev0 | __rev1;
  14689. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14690. return __ret;
  14691. }
  14692. #endif
  14693. #ifdef __LITTLE_ENDIAN__
  14694. __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  14695. uint64x2_t __ret;
  14696. __ret = __p0 | __p1;
  14697. return __ret;
  14698. }
  14699. #else
  14700. __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  14701. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14702. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14703. uint64x2_t __ret;
  14704. __ret = __rev0 | __rev1;
  14705. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14706. return __ret;
  14707. }
  14708. #endif
  14709. #ifdef __LITTLE_ENDIAN__
  14710. __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  14711. uint16x8_t __ret;
  14712. __ret = __p0 | __p1;
  14713. return __ret;
  14714. }
  14715. #else
  14716. __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  14717. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14718. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14719. uint16x8_t __ret;
  14720. __ret = __rev0 | __rev1;
  14721. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14722. return __ret;
  14723. }
  14724. #endif
  14725. #ifdef __LITTLE_ENDIAN__
  14726. __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
  14727. int8x16_t __ret;
  14728. __ret = __p0 | __p1;
  14729. return __ret;
  14730. }
  14731. #else
  14732. __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
  14733. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14734. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14735. int8x16_t __ret;
  14736. __ret = __rev0 | __rev1;
  14737. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14738. return __ret;
  14739. }
  14740. #endif
  14741. #ifdef __LITTLE_ENDIAN__
  14742. __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
  14743. int32x4_t __ret;
  14744. __ret = __p0 | __p1;
  14745. return __ret;
  14746. }
  14747. #else
  14748. __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
  14749. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14750. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14751. int32x4_t __ret;
  14752. __ret = __rev0 | __rev1;
  14753. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14754. return __ret;
  14755. }
  14756. #endif
  14757. #ifdef __LITTLE_ENDIAN__
  14758. __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
  14759. int64x2_t __ret;
  14760. __ret = __p0 | __p1;
  14761. return __ret;
  14762. }
  14763. #else
  14764. __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
  14765. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14766. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14767. int64x2_t __ret;
  14768. __ret = __rev0 | __rev1;
  14769. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14770. return __ret;
  14771. }
  14772. #endif
  14773. #ifdef __LITTLE_ENDIAN__
  14774. __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
  14775. int16x8_t __ret;
  14776. __ret = __p0 | __p1;
  14777. return __ret;
  14778. }
  14779. #else
  14780. __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
  14781. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14782. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14783. int16x8_t __ret;
  14784. __ret = __rev0 | __rev1;
  14785. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14786. return __ret;
  14787. }
  14788. #endif
  14789. #ifdef __LITTLE_ENDIAN__
  14790. __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
  14791. uint8x8_t __ret;
  14792. __ret = __p0 | __p1;
  14793. return __ret;
  14794. }
  14795. #else
  14796. __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
  14797. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14798. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14799. uint8x8_t __ret;
  14800. __ret = __rev0 | __rev1;
  14801. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14802. return __ret;
  14803. }
  14804. #endif
  14805. #ifdef __LITTLE_ENDIAN__
  14806. __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
  14807. uint32x2_t __ret;
  14808. __ret = __p0 | __p1;
  14809. return __ret;
  14810. }
  14811. #else
  14812. __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
  14813. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14814. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14815. uint32x2_t __ret;
  14816. __ret = __rev0 | __rev1;
  14817. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14818. return __ret;
  14819. }
  14820. #endif
  14821. #ifdef __LITTLE_ENDIAN__
  14822. __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
  14823. uint64x1_t __ret;
  14824. __ret = __p0 | __p1;
  14825. return __ret;
  14826. }
  14827. #else
  14828. __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
  14829. uint64x1_t __ret;
  14830. __ret = __p0 | __p1;
  14831. return __ret;
  14832. }
  14833. #endif
  14834. #ifdef __LITTLE_ENDIAN__
  14835. __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
  14836. uint16x4_t __ret;
  14837. __ret = __p0 | __p1;
  14838. return __ret;
  14839. }
  14840. #else
  14841. __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
  14842. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14843. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14844. uint16x4_t __ret;
  14845. __ret = __rev0 | __rev1;
  14846. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14847. return __ret;
  14848. }
  14849. #endif
  14850. #ifdef __LITTLE_ENDIAN__
  14851. __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
  14852. int8x8_t __ret;
  14853. __ret = __p0 | __p1;
  14854. return __ret;
  14855. }
  14856. #else
  14857. __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
  14858. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14859. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14860. int8x8_t __ret;
  14861. __ret = __rev0 | __rev1;
  14862. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14863. return __ret;
  14864. }
  14865. #endif
  14866. #ifdef __LITTLE_ENDIAN__
  14867. __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
  14868. int32x2_t __ret;
  14869. __ret = __p0 | __p1;
  14870. return __ret;
  14871. }
  14872. #else
  14873. __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
  14874. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14875. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  14876. int32x2_t __ret;
  14877. __ret = __rev0 | __rev1;
  14878. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14879. return __ret;
  14880. }
  14881. #endif
  14882. #ifdef __LITTLE_ENDIAN__
  14883. __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
  14884. int64x1_t __ret;
  14885. __ret = __p0 | __p1;
  14886. return __ret;
  14887. }
  14888. #else
  14889. __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
  14890. int64x1_t __ret;
  14891. __ret = __p0 | __p1;
  14892. return __ret;
  14893. }
  14894. #endif
  14895. #ifdef __LITTLE_ENDIAN__
  14896. __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
  14897. int16x4_t __ret;
  14898. __ret = __p0 | __p1;
  14899. return __ret;
  14900. }
  14901. #else
  14902. __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
  14903. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14904. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14905. int16x4_t __ret;
  14906. __ret = __rev0 | __rev1;
  14907. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14908. return __ret;
  14909. }
  14910. #endif
  14911. #ifdef __LITTLE_ENDIAN__
  14912. __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
  14913. uint16x8_t __ret;
  14914. __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  14915. return __ret;
  14916. }
  14917. #else
  14918. __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
  14919. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14920. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14921. uint16x8_t __ret;
  14922. __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  14923. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14924. return __ret;
  14925. }
  14926. #endif
  14927. #ifdef __LITTLE_ENDIAN__
  14928. __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
  14929. uint64x2_t __ret;
  14930. __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  14931. return __ret;
  14932. }
  14933. #else
  14934. __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
  14935. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14936. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14937. uint64x2_t __ret;
  14938. __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  14939. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14940. return __ret;
  14941. }
  14942. #endif
  14943. #ifdef __LITTLE_ENDIAN__
  14944. __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
  14945. uint32x4_t __ret;
  14946. __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  14947. return __ret;
  14948. }
  14949. #else
  14950. __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
  14951. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  14952. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  14953. uint32x4_t __ret;
  14954. __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  14955. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  14956. return __ret;
  14957. }
  14958. #endif
  14959. #ifdef __LITTLE_ENDIAN__
  14960. __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
  14961. int16x8_t __ret;
  14962. __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  14963. return __ret;
  14964. }
  14965. #else
  14966. __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
  14967. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  14968. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  14969. int16x8_t __ret;
  14970. __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  14971. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  14972. return __ret;
  14973. }
  14974. #endif
  14975. #ifdef __LITTLE_ENDIAN__
  14976. __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
  14977. int64x2_t __ret;
  14978. __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  14979. return __ret;
  14980. }
  14981. #else
  14982. __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
  14983. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  14984. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  14985. int64x2_t __ret;
  14986. __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  14987. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  14988. return __ret;
  14989. }
  14990. #endif
  14991. #ifdef __LITTLE_ENDIAN__
  14992. __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
  14993. int32x4_t __ret;
  14994. __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  14995. return __ret;
  14996. }
  14997. #else
  14998. __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
  14999. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15000. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15001. int32x4_t __ret;
  15002. __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  15003. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15004. return __ret;
  15005. }
  15006. #endif
  15007. #ifdef __LITTLE_ENDIAN__
  15008. __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
  15009. uint16x4_t __ret;
  15010. __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  15011. return __ret;
  15012. }
  15013. #else
  15014. __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
  15015. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15016. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15017. uint16x4_t __ret;
  15018. __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  15019. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15020. return __ret;
  15021. }
  15022. #endif
  15023. #ifdef __LITTLE_ENDIAN__
  15024. __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
  15025. uint64x1_t __ret;
  15026. __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  15027. return __ret;
  15028. }
  15029. #else
  15030. __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
  15031. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15032. uint64x1_t __ret;
  15033. __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
  15034. return __ret;
  15035. }
  15036. #endif
  15037. #ifdef __LITTLE_ENDIAN__
  15038. __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
  15039. uint32x2_t __ret;
  15040. __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  15041. return __ret;
  15042. }
  15043. #else
  15044. __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
  15045. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15046. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15047. uint32x2_t __ret;
  15048. __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  15049. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15050. return __ret;
  15051. }
  15052. #endif
  15053. #ifdef __LITTLE_ENDIAN__
  15054. __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
  15055. int16x4_t __ret;
  15056. __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15057. return __ret;
  15058. }
  15059. #else
  15060. __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
  15061. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15062. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15063. int16x4_t __ret;
  15064. __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  15065. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15066. return __ret;
  15067. }
  15068. #endif
  15069. #ifdef __LITTLE_ENDIAN__
  15070. __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
  15071. int64x1_t __ret;
  15072. __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  15073. return __ret;
  15074. }
  15075. #else
  15076. __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
  15077. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15078. int64x1_t __ret;
  15079. __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
  15080. return __ret;
  15081. }
  15082. #endif
  15083. #ifdef __LITTLE_ENDIAN__
  15084. __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
  15085. int32x2_t __ret;
  15086. __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15087. return __ret;
  15088. }
  15089. #else
  15090. __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
  15091. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15092. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15093. int32x2_t __ret;
  15094. __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  15095. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15096. return __ret;
  15097. }
  15098. #endif
  15099. #ifdef __LITTLE_ENDIAN__
  15100. __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15101. uint8x8_t __ret;
  15102. __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  15103. return __ret;
  15104. }
  15105. #else
  15106. __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15107. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15108. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15109. uint8x8_t __ret;
  15110. __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  15111. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15112. return __ret;
  15113. }
  15114. #endif
  15115. #ifdef __LITTLE_ENDIAN__
  15116. __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15117. uint32x2_t __ret;
  15118. __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  15119. return __ret;
  15120. }
  15121. #else
  15122. __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15123. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15124. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15125. uint32x2_t __ret;
  15126. __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  15127. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15128. return __ret;
  15129. }
  15130. #endif
  15131. #ifdef __LITTLE_ENDIAN__
  15132. __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15133. uint16x4_t __ret;
  15134. __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  15135. return __ret;
  15136. }
  15137. #else
  15138. __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15139. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15140. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15141. uint16x4_t __ret;
  15142. __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  15143. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15144. return __ret;
  15145. }
  15146. #endif
  15147. #ifdef __LITTLE_ENDIAN__
  15148. __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
  15149. int8x8_t __ret;
  15150. __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  15151. return __ret;
  15152. }
  15153. #else
  15154. __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
  15155. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15156. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15157. int8x8_t __ret;
  15158. __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  15159. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15160. return __ret;
  15161. }
  15162. #endif
  15163. #ifdef __LITTLE_ENDIAN__
  15164. __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
  15165. float32x2_t __ret;
  15166. __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  15167. return __ret;
  15168. }
  15169. #else
  15170. __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
  15171. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15172. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15173. float32x2_t __ret;
  15174. __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  15175. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15176. return __ret;
  15177. }
  15178. #endif
  15179. #ifdef __LITTLE_ENDIAN__
  15180. __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
  15181. int32x2_t __ret;
  15182. __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15183. return __ret;
  15184. }
  15185. #else
  15186. __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
  15187. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15188. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15189. int32x2_t __ret;
  15190. __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  15191. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15192. return __ret;
  15193. }
  15194. #endif
  15195. #ifdef __LITTLE_ENDIAN__
  15196. __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
  15197. int16x4_t __ret;
  15198. __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15199. return __ret;
  15200. }
  15201. #else
  15202. __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
  15203. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15204. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15205. int16x4_t __ret;
  15206. __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  15207. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15208. return __ret;
  15209. }
  15210. #endif
  15211. #ifdef __LITTLE_ENDIAN__
  15212. __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
  15213. uint16x8_t __ret;
  15214. __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
  15215. return __ret;
  15216. }
  15217. #else
  15218. __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
  15219. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15220. uint16x8_t __ret;
  15221. __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
  15222. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15223. return __ret;
  15224. }
  15225. #endif
  15226. #ifdef __LITTLE_ENDIAN__
  15227. __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
  15228. uint64x2_t __ret;
  15229. __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
  15230. return __ret;
  15231. }
  15232. #else
  15233. __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
  15234. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15235. uint64x2_t __ret;
  15236. __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
  15237. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15238. return __ret;
  15239. }
  15240. #endif
  15241. #ifdef __LITTLE_ENDIAN__
  15242. __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
  15243. uint32x4_t __ret;
  15244. __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
  15245. return __ret;
  15246. }
  15247. #else
  15248. __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
  15249. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15250. uint32x4_t __ret;
  15251. __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
  15252. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15253. return __ret;
  15254. }
  15255. #endif
  15256. #ifdef __LITTLE_ENDIAN__
  15257. __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
  15258. int16x8_t __ret;
  15259. __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
  15260. return __ret;
  15261. }
  15262. #else
  15263. __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
  15264. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15265. int16x8_t __ret;
  15266. __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
  15267. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15268. return __ret;
  15269. }
  15270. #endif
  15271. #ifdef __LITTLE_ENDIAN__
  15272. __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
  15273. int64x2_t __ret;
  15274. __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
  15275. return __ret;
  15276. }
  15277. #else
  15278. __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
  15279. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15280. int64x2_t __ret;
  15281. __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
  15282. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15283. return __ret;
  15284. }
  15285. #endif
  15286. #ifdef __LITTLE_ENDIAN__
  15287. __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
  15288. int32x4_t __ret;
  15289. __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
  15290. return __ret;
  15291. }
  15292. #else
  15293. __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
  15294. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15295. int32x4_t __ret;
  15296. __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
  15297. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15298. return __ret;
  15299. }
  15300. #endif
  15301. #ifdef __LITTLE_ENDIAN__
  15302. __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
  15303. uint16x4_t __ret;
  15304. __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
  15305. return __ret;
  15306. }
  15307. #else
  15308. __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
  15309. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15310. uint16x4_t __ret;
  15311. __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
  15312. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15313. return __ret;
  15314. }
  15315. #endif
  15316. #ifdef __LITTLE_ENDIAN__
  15317. __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
  15318. uint64x1_t __ret;
  15319. __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
  15320. return __ret;
  15321. }
  15322. #else
  15323. __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
  15324. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15325. uint64x1_t __ret;
  15326. __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
  15327. return __ret;
  15328. }
  15329. #endif
  15330. #ifdef __LITTLE_ENDIAN__
  15331. __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
  15332. uint32x2_t __ret;
  15333. __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
  15334. return __ret;
  15335. }
  15336. #else
  15337. __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
  15338. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15339. uint32x2_t __ret;
  15340. __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
  15341. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15342. return __ret;
  15343. }
  15344. #endif
  15345. #ifdef __LITTLE_ENDIAN__
  15346. __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
  15347. int16x4_t __ret;
  15348. __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
  15349. return __ret;
  15350. }
  15351. #else
  15352. __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
  15353. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15354. int16x4_t __ret;
  15355. __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
  15356. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15357. return __ret;
  15358. }
  15359. #endif
  15360. #ifdef __LITTLE_ENDIAN__
  15361. __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
  15362. int64x1_t __ret;
  15363. __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
  15364. return __ret;
  15365. }
  15366. #else
  15367. __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
  15368. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15369. int64x1_t __ret;
  15370. __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
  15371. return __ret;
  15372. }
  15373. #endif
  15374. #ifdef __LITTLE_ENDIAN__
  15375. __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
  15376. int32x2_t __ret;
  15377. __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
  15378. return __ret;
  15379. }
  15380. #else
  15381. __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
  15382. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15383. int32x2_t __ret;
  15384. __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
  15385. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15386. return __ret;
  15387. }
  15388. #endif
  15389. #ifdef __LITTLE_ENDIAN__
  15390. __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15391. uint8x8_t __ret;
  15392. __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  15393. return __ret;
  15394. }
  15395. #else
  15396. __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15397. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15398. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15399. uint8x8_t __ret;
  15400. __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  15401. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15402. return __ret;
  15403. }
  15404. #endif
  15405. #ifdef __LITTLE_ENDIAN__
  15406. __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15407. uint32x2_t __ret;
  15408. __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  15409. return __ret;
  15410. }
  15411. #else
  15412. __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15413. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15414. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15415. uint32x2_t __ret;
  15416. __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  15417. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15418. return __ret;
  15419. }
  15420. #endif
  15421. #ifdef __LITTLE_ENDIAN__
  15422. __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15423. uint16x4_t __ret;
  15424. __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  15425. return __ret;
  15426. }
  15427. #else
  15428. __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15429. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15430. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15431. uint16x4_t __ret;
  15432. __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  15433. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15434. return __ret;
  15435. }
  15436. #endif
  15437. #ifdef __LITTLE_ENDIAN__
  15438. __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
  15439. int8x8_t __ret;
  15440. __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  15441. return __ret;
  15442. }
  15443. #else
  15444. __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
  15445. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15446. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15447. int8x8_t __ret;
  15448. __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  15449. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15450. return __ret;
  15451. }
  15452. #endif
  15453. #ifdef __LITTLE_ENDIAN__
  15454. __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
  15455. float32x2_t __ret;
  15456. __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  15457. return __ret;
  15458. }
  15459. #else
  15460. __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
  15461. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15462. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15463. float32x2_t __ret;
  15464. __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  15465. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15466. return __ret;
  15467. }
  15468. #endif
  15469. #ifdef __LITTLE_ENDIAN__
  15470. __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
  15471. int32x2_t __ret;
  15472. __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15473. return __ret;
  15474. }
  15475. #else
  15476. __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
  15477. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15478. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15479. int32x2_t __ret;
  15480. __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  15481. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15482. return __ret;
  15483. }
  15484. #endif
  15485. #ifdef __LITTLE_ENDIAN__
  15486. __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
  15487. int16x4_t __ret;
  15488. __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15489. return __ret;
  15490. }
  15491. #else
  15492. __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
  15493. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15494. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15495. int16x4_t __ret;
  15496. __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  15497. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15498. return __ret;
  15499. }
  15500. #endif
  15501. #ifdef __LITTLE_ENDIAN__
  15502. __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15503. uint8x8_t __ret;
  15504. __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  15505. return __ret;
  15506. }
  15507. #else
  15508. __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15509. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15510. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15511. uint8x8_t __ret;
  15512. __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  15513. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15514. return __ret;
  15515. }
  15516. #endif
  15517. #ifdef __LITTLE_ENDIAN__
  15518. __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15519. uint32x2_t __ret;
  15520. __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  15521. return __ret;
  15522. }
  15523. #else
  15524. __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15525. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15526. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15527. uint32x2_t __ret;
  15528. __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  15529. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15530. return __ret;
  15531. }
  15532. #endif
  15533. #ifdef __LITTLE_ENDIAN__
  15534. __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15535. uint16x4_t __ret;
  15536. __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  15537. return __ret;
  15538. }
  15539. #else
  15540. __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15541. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15542. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15543. uint16x4_t __ret;
  15544. __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  15545. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15546. return __ret;
  15547. }
  15548. #endif
  15549. #ifdef __LITTLE_ENDIAN__
  15550. __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
  15551. int8x8_t __ret;
  15552. __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  15553. return __ret;
  15554. }
  15555. #else
  15556. __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
  15557. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15558. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15559. int8x8_t __ret;
  15560. __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  15561. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15562. return __ret;
  15563. }
  15564. #endif
  15565. #ifdef __LITTLE_ENDIAN__
  15566. __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
  15567. float32x2_t __ret;
  15568. __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  15569. return __ret;
  15570. }
  15571. #else
  15572. __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
  15573. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15574. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15575. float32x2_t __ret;
  15576. __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  15577. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15578. return __ret;
  15579. }
  15580. #endif
  15581. #ifdef __LITTLE_ENDIAN__
  15582. __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
  15583. int32x2_t __ret;
  15584. __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15585. return __ret;
  15586. }
  15587. #else
  15588. __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
  15589. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15590. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15591. int32x2_t __ret;
  15592. __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  15593. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15594. return __ret;
  15595. }
  15596. #endif
  15597. #ifdef __LITTLE_ENDIAN__
  15598. __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
  15599. int16x4_t __ret;
  15600. __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15601. return __ret;
  15602. }
  15603. #else
  15604. __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
  15605. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15606. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15607. int16x4_t __ret;
  15608. __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  15609. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15610. return __ret;
  15611. }
  15612. #endif
  15613. #ifdef __LITTLE_ENDIAN__
  15614. __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
  15615. int8x16_t __ret;
  15616. __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
  15617. return __ret;
  15618. }
  15619. #else
  15620. __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
  15621. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15622. int8x16_t __ret;
  15623. __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
  15624. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15625. return __ret;
  15626. }
  15627. #endif
  15628. #ifdef __LITTLE_ENDIAN__
  15629. __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
  15630. int32x4_t __ret;
  15631. __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
  15632. return __ret;
  15633. }
  15634. #else
  15635. __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
  15636. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15637. int32x4_t __ret;
  15638. __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
  15639. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15640. return __ret;
  15641. }
  15642. #endif
  15643. #ifdef __LITTLE_ENDIAN__
  15644. __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
  15645. int16x8_t __ret;
  15646. __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
  15647. return __ret;
  15648. }
  15649. #else
  15650. __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
  15651. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15652. int16x8_t __ret;
  15653. __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
  15654. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15655. return __ret;
  15656. }
  15657. #endif
  15658. #ifdef __LITTLE_ENDIAN__
  15659. __ai int8x8_t vqabs_s8(int8x8_t __p0) {
  15660. int8x8_t __ret;
  15661. __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
  15662. return __ret;
  15663. }
  15664. #else
  15665. __ai int8x8_t vqabs_s8(int8x8_t __p0) {
  15666. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15667. int8x8_t __ret;
  15668. __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
  15669. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15670. return __ret;
  15671. }
  15672. #endif
  15673. #ifdef __LITTLE_ENDIAN__
  15674. __ai int32x2_t vqabs_s32(int32x2_t __p0) {
  15675. int32x2_t __ret;
  15676. __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
  15677. return __ret;
  15678. }
  15679. #else
  15680. __ai int32x2_t vqabs_s32(int32x2_t __p0) {
  15681. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15682. int32x2_t __ret;
  15683. __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
  15684. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15685. return __ret;
  15686. }
  15687. #endif
  15688. #ifdef __LITTLE_ENDIAN__
  15689. __ai int16x4_t vqabs_s16(int16x4_t __p0) {
  15690. int16x4_t __ret;
  15691. __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
  15692. return __ret;
  15693. }
  15694. #else
  15695. __ai int16x4_t vqabs_s16(int16x4_t __p0) {
  15696. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15697. int16x4_t __ret;
  15698. __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
  15699. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15700. return __ret;
  15701. }
  15702. #endif
  15703. #ifdef __LITTLE_ENDIAN__
  15704. __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  15705. uint8x16_t __ret;
  15706. __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  15707. return __ret;
  15708. }
  15709. #else
  15710. __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  15711. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15712. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15713. uint8x16_t __ret;
  15714. __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  15715. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15716. return __ret;
  15717. }
  15718. #endif
  15719. #ifdef __LITTLE_ENDIAN__
  15720. __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  15721. uint32x4_t __ret;
  15722. __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  15723. return __ret;
  15724. }
  15725. #else
  15726. __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  15727. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15728. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15729. uint32x4_t __ret;
  15730. __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  15731. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15732. return __ret;
  15733. }
  15734. #endif
  15735. #ifdef __LITTLE_ENDIAN__
  15736. __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  15737. uint64x2_t __ret;
  15738. __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  15739. return __ret;
  15740. }
  15741. #else
  15742. __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  15743. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15744. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15745. uint64x2_t __ret;
  15746. __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  15747. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15748. return __ret;
  15749. }
  15750. #endif
  15751. #ifdef __LITTLE_ENDIAN__
  15752. __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  15753. uint16x8_t __ret;
  15754. __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  15755. return __ret;
  15756. }
  15757. #else
  15758. __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  15759. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15760. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15761. uint16x8_t __ret;
  15762. __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  15763. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15764. return __ret;
  15765. }
  15766. #endif
  15767. #ifdef __LITTLE_ENDIAN__
  15768. __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  15769. int8x16_t __ret;
  15770. __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  15771. return __ret;
  15772. }
  15773. #else
  15774. __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  15775. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15776. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15777. int8x16_t __ret;
  15778. __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  15779. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  15780. return __ret;
  15781. }
  15782. #endif
  15783. #ifdef __LITTLE_ENDIAN__
  15784. __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  15785. int32x4_t __ret;
  15786. __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  15787. return __ret;
  15788. }
  15789. #else
  15790. __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  15791. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15792. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15793. int32x4_t __ret;
  15794. __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  15795. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15796. return __ret;
  15797. }
  15798. __ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  15799. int32x4_t __ret;
  15800. __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  15801. return __ret;
  15802. }
  15803. #endif
  15804. #ifdef __LITTLE_ENDIAN__
  15805. __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  15806. int64x2_t __ret;
  15807. __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  15808. return __ret;
  15809. }
  15810. #else
  15811. __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  15812. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15813. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15814. int64x2_t __ret;
  15815. __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  15816. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15817. return __ret;
  15818. }
  15819. #endif
  15820. #ifdef __LITTLE_ENDIAN__
  15821. __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  15822. int16x8_t __ret;
  15823. __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  15824. return __ret;
  15825. }
  15826. #else
  15827. __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  15828. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15829. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15830. int16x8_t __ret;
  15831. __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  15832. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15833. return __ret;
  15834. }
  15835. __ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  15836. int16x8_t __ret;
  15837. __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  15838. return __ret;
  15839. }
  15840. #endif
  15841. #ifdef __LITTLE_ENDIAN__
  15842. __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15843. uint8x8_t __ret;
  15844. __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  15845. return __ret;
  15846. }
  15847. #else
  15848. __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  15849. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15850. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15851. uint8x8_t __ret;
  15852. __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  15853. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15854. return __ret;
  15855. }
  15856. #endif
  15857. #ifdef __LITTLE_ENDIAN__
  15858. __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15859. uint32x2_t __ret;
  15860. __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  15861. return __ret;
  15862. }
  15863. #else
  15864. __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  15865. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15866. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15867. uint32x2_t __ret;
  15868. __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  15869. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15870. return __ret;
  15871. }
  15872. #endif
  15873. #ifdef __LITTLE_ENDIAN__
  15874. __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  15875. uint64x1_t __ret;
  15876. __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  15877. return __ret;
  15878. }
  15879. #else
  15880. __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  15881. uint64x1_t __ret;
  15882. __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  15883. return __ret;
  15884. }
  15885. #endif
  15886. #ifdef __LITTLE_ENDIAN__
  15887. __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15888. uint16x4_t __ret;
  15889. __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  15890. return __ret;
  15891. }
  15892. #else
  15893. __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  15894. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15895. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15896. uint16x4_t __ret;
  15897. __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  15898. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15899. return __ret;
  15900. }
  15901. #endif
  15902. #ifdef __LITTLE_ENDIAN__
  15903. __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
  15904. int8x8_t __ret;
  15905. __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  15906. return __ret;
  15907. }
  15908. #else
  15909. __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
  15910. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  15911. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  15912. int8x8_t __ret;
  15913. __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  15914. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  15915. return __ret;
  15916. }
  15917. #endif
  15918. #ifdef __LITTLE_ENDIAN__
  15919. __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
  15920. int32x2_t __ret;
  15921. __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15922. return __ret;
  15923. }
  15924. #else
  15925. __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
  15926. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15927. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15928. int32x2_t __ret;
  15929. __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  15930. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15931. return __ret;
  15932. }
  15933. __ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
  15934. int32x2_t __ret;
  15935. __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  15936. return __ret;
  15937. }
  15938. #endif
  15939. #ifdef __LITTLE_ENDIAN__
  15940. __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
  15941. int64x1_t __ret;
  15942. __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  15943. return __ret;
  15944. }
  15945. #else
  15946. __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
  15947. int64x1_t __ret;
  15948. __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  15949. return __ret;
  15950. }
  15951. #endif
  15952. #ifdef __LITTLE_ENDIAN__
  15953. __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
  15954. int16x4_t __ret;
  15955. __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15956. return __ret;
  15957. }
  15958. #else
  15959. __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
  15960. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  15961. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  15962. int16x4_t __ret;
  15963. __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  15964. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  15965. return __ret;
  15966. }
  15967. __ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
  15968. int16x4_t __ret;
  15969. __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  15970. return __ret;
  15971. }
  15972. #endif
  15973. #ifdef __LITTLE_ENDIAN__
  15974. __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  15975. int64x2_t __ret;
  15976. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
  15977. return __ret;
  15978. }
  15979. #else
  15980. __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  15981. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  15982. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  15983. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  15984. int64x2_t __ret;
  15985. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
  15986. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  15987. return __ret;
  15988. }
  15989. __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  15990. int64x2_t __ret;
  15991. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
  15992. return __ret;
  15993. }
  15994. #endif
  15995. #ifdef __LITTLE_ENDIAN__
  15996. __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  15997. int32x4_t __ret;
  15998. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
  15999. return __ret;
  16000. }
  16001. #else
  16002. __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  16003. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16004. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16005. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  16006. int32x4_t __ret;
  16007. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
  16008. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16009. return __ret;
  16010. }
  16011. __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  16012. int32x4_t __ret;
  16013. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
  16014. return __ret;
  16015. }
  16016. #endif
  16017. #ifdef __LITTLE_ENDIAN__
  16018. #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  16019. int64x2_t __s0 = __p0; \
  16020. int32x2_t __s1 = __p1; \
  16021. int32x2_t __s2 = __p2; \
  16022. int64x2_t __ret; \
  16023. __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  16024. __ret; \
  16025. })
  16026. #else
  16027. #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  16028. int64x2_t __s0 = __p0; \
  16029. int32x2_t __s1 = __p1; \
  16030. int32x2_t __s2 = __p2; \
  16031. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  16032. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16033. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  16034. int64x2_t __ret; \
  16035. __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  16036. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  16037. __ret; \
  16038. })
  16039. #endif
  16040. #ifdef __LITTLE_ENDIAN__
  16041. #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  16042. int32x4_t __s0 = __p0; \
  16043. int16x4_t __s1 = __p1; \
  16044. int16x4_t __s2 = __p2; \
  16045. int32x4_t __ret; \
  16046. __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  16047. __ret; \
  16048. })
  16049. #else
  16050. #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  16051. int32x4_t __s0 = __p0; \
  16052. int16x4_t __s1 = __p1; \
  16053. int16x4_t __s2 = __p2; \
  16054. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16055. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16056. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  16057. int32x4_t __ret; \
  16058. __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  16059. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16060. __ret; \
  16061. })
  16062. #endif
  16063. #ifdef __LITTLE_ENDIAN__
  16064. __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16065. int64x2_t __ret;
  16066. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16067. return __ret;
  16068. }
  16069. #else
  16070. __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16071. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16072. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16073. int64x2_t __ret;
  16074. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16075. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16076. return __ret;
  16077. }
  16078. __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16079. int64x2_t __ret;
  16080. __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16081. return __ret;
  16082. }
  16083. #endif
  16084. #ifdef __LITTLE_ENDIAN__
  16085. __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16086. int32x4_t __ret;
  16087. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16088. return __ret;
  16089. }
  16090. #else
  16091. __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16092. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16093. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16094. int32x4_t __ret;
  16095. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16096. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16097. return __ret;
  16098. }
  16099. __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16100. int32x4_t __ret;
  16101. __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16102. return __ret;
  16103. }
  16104. #endif
  16105. #ifdef __LITTLE_ENDIAN__
  16106. __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  16107. int64x2_t __ret;
  16108. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
  16109. return __ret;
  16110. }
  16111. #else
  16112. __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  16113. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16114. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16115. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  16116. int64x2_t __ret;
  16117. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
  16118. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16119. return __ret;
  16120. }
  16121. __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  16122. int64x2_t __ret;
  16123. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
  16124. return __ret;
  16125. }
  16126. #endif
  16127. #ifdef __LITTLE_ENDIAN__
  16128. __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  16129. int32x4_t __ret;
  16130. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
  16131. return __ret;
  16132. }
  16133. #else
  16134. __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  16135. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16136. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16137. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  16138. int32x4_t __ret;
  16139. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
  16140. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16141. return __ret;
  16142. }
  16143. __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  16144. int32x4_t __ret;
  16145. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
  16146. return __ret;
  16147. }
  16148. #endif
  16149. #ifdef __LITTLE_ENDIAN__
  16150. #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  16151. int64x2_t __s0 = __p0; \
  16152. int32x2_t __s1 = __p1; \
  16153. int32x2_t __s2 = __p2; \
  16154. int64x2_t __ret; \
  16155. __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  16156. __ret; \
  16157. })
  16158. #else
  16159. #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  16160. int64x2_t __s0 = __p0; \
  16161. int32x2_t __s1 = __p1; \
  16162. int32x2_t __s2 = __p2; \
  16163. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  16164. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16165. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  16166. int64x2_t __ret; \
  16167. __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  16168. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  16169. __ret; \
  16170. })
  16171. #endif
  16172. #ifdef __LITTLE_ENDIAN__
  16173. #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  16174. int32x4_t __s0 = __p0; \
  16175. int16x4_t __s1 = __p1; \
  16176. int16x4_t __s2 = __p2; \
  16177. int32x4_t __ret; \
  16178. __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  16179. __ret; \
  16180. })
  16181. #else
  16182. #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  16183. int32x4_t __s0 = __p0; \
  16184. int16x4_t __s1 = __p1; \
  16185. int16x4_t __s2 = __p2; \
  16186. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16187. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16188. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  16189. int32x4_t __ret; \
  16190. __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  16191. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16192. __ret; \
  16193. })
  16194. #endif
  16195. #ifdef __LITTLE_ENDIAN__
  16196. __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16197. int64x2_t __ret;
  16198. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16199. return __ret;
  16200. }
  16201. #else
  16202. __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16203. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16204. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16205. int64x2_t __ret;
  16206. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16207. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16208. return __ret;
  16209. }
  16210. __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  16211. int64x2_t __ret;
  16212. __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
  16213. return __ret;
  16214. }
  16215. #endif
  16216. #ifdef __LITTLE_ENDIAN__
  16217. __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16218. int32x4_t __ret;
  16219. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16220. return __ret;
  16221. }
  16222. #else
  16223. __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16224. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16225. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16226. int32x4_t __ret;
  16227. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16228. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16229. return __ret;
  16230. }
  16231. __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  16232. int32x4_t __ret;
  16233. __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
  16234. return __ret;
  16235. }
  16236. #endif
  16237. #ifdef __LITTLE_ENDIAN__
  16238. __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16239. int32x4_t __ret;
  16240. __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  16241. return __ret;
  16242. }
  16243. #else
  16244. __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16245. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16246. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16247. int32x4_t __ret;
  16248. __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  16249. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16250. return __ret;
  16251. }
  16252. __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16253. int32x4_t __ret;
  16254. __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  16255. return __ret;
  16256. }
  16257. #endif
  16258. #ifdef __LITTLE_ENDIAN__
  16259. __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16260. int16x8_t __ret;
  16261. __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  16262. return __ret;
  16263. }
  16264. #else
  16265. __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16266. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16267. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  16268. int16x8_t __ret;
  16269. __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  16270. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16271. return __ret;
  16272. }
  16273. __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16274. int16x8_t __ret;
  16275. __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  16276. return __ret;
  16277. }
  16278. #endif
  16279. #ifdef __LITTLE_ENDIAN__
  16280. __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16281. int32x2_t __ret;
  16282. __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  16283. return __ret;
  16284. }
  16285. #else
  16286. __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16287. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16288. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16289. int32x2_t __ret;
  16290. __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  16291. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16292. return __ret;
  16293. }
  16294. __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16295. int32x2_t __ret;
  16296. __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  16297. return __ret;
  16298. }
  16299. #endif
  16300. #ifdef __LITTLE_ENDIAN__
  16301. __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16302. int16x4_t __ret;
  16303. __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  16304. return __ret;
  16305. }
  16306. #else
  16307. __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16308. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16309. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16310. int16x4_t __ret;
  16311. __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  16312. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16313. return __ret;
  16314. }
  16315. __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16316. int16x4_t __ret;
  16317. __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  16318. return __ret;
  16319. }
  16320. #endif
  16321. #ifdef __LITTLE_ENDIAN__
  16322. #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16323. int32x4_t __s0 = __p0; \
  16324. int32x2_t __s1 = __p1; \
  16325. int32x4_t __ret; \
  16326. __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  16327. __ret; \
  16328. })
  16329. #else
  16330. #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16331. int32x4_t __s0 = __p0; \
  16332. int32x2_t __s1 = __p1; \
  16333. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16334. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16335. int32x4_t __ret; \
  16336. __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  16337. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16338. __ret; \
  16339. })
  16340. #endif
  16341. #ifdef __LITTLE_ENDIAN__
  16342. #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16343. int16x8_t __s0 = __p0; \
  16344. int16x4_t __s1 = __p1; \
  16345. int16x8_t __ret; \
  16346. __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  16347. __ret; \
  16348. })
  16349. #else
  16350. #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16351. int16x8_t __s0 = __p0; \
  16352. int16x4_t __s1 = __p1; \
  16353. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  16354. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16355. int16x8_t __ret; \
  16356. __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  16357. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  16358. __ret; \
  16359. })
  16360. #endif
  16361. #ifdef __LITTLE_ENDIAN__
  16362. #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16363. int32x2_t __s0 = __p0; \
  16364. int32x2_t __s1 = __p1; \
  16365. int32x2_t __ret; \
  16366. __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  16367. __ret; \
  16368. })
  16369. #else
  16370. #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16371. int32x2_t __s0 = __p0; \
  16372. int32x2_t __s1 = __p1; \
  16373. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  16374. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16375. int32x2_t __ret; \
  16376. __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  16377. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  16378. __ret; \
  16379. })
  16380. #endif
  16381. #ifdef __LITTLE_ENDIAN__
  16382. #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16383. int16x4_t __s0 = __p0; \
  16384. int16x4_t __s1 = __p1; \
  16385. int16x4_t __ret; \
  16386. __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  16387. __ret; \
  16388. })
  16389. #else
  16390. #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16391. int16x4_t __s0 = __p0; \
  16392. int16x4_t __s1 = __p1; \
  16393. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16394. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16395. int16x4_t __ret; \
  16396. __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  16397. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16398. __ret; \
  16399. })
  16400. #endif
  16401. #ifdef __LITTLE_ENDIAN__
  16402. __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
  16403. int32x4_t __ret;
  16404. __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
  16405. return __ret;
  16406. }
  16407. #else
  16408. __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
  16409. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16410. int32x4_t __ret;
  16411. __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
  16412. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16413. return __ret;
  16414. }
  16415. #endif
  16416. #ifdef __LITTLE_ENDIAN__
  16417. __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
  16418. int16x8_t __ret;
  16419. __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
  16420. return __ret;
  16421. }
  16422. #else
  16423. __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
  16424. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16425. int16x8_t __ret;
  16426. __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
  16427. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16428. return __ret;
  16429. }
  16430. #endif
  16431. #ifdef __LITTLE_ENDIAN__
  16432. __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
  16433. int32x2_t __ret;
  16434. __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
  16435. return __ret;
  16436. }
  16437. #else
  16438. __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
  16439. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16440. int32x2_t __ret;
  16441. __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
  16442. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16443. return __ret;
  16444. }
  16445. #endif
  16446. #ifdef __LITTLE_ENDIAN__
  16447. __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
  16448. int16x4_t __ret;
  16449. __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
  16450. return __ret;
  16451. }
  16452. #else
  16453. __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
  16454. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16455. int16x4_t __ret;
  16456. __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
  16457. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16458. return __ret;
  16459. }
  16460. #endif
  16461. #ifdef __LITTLE_ENDIAN__
  16462. __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
  16463. int64x2_t __ret;
  16464. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
  16465. return __ret;
  16466. }
  16467. #else
  16468. __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
  16469. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16470. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16471. int64x2_t __ret;
  16472. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
  16473. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16474. return __ret;
  16475. }
  16476. __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
  16477. int64x2_t __ret;
  16478. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
  16479. return __ret;
  16480. }
  16481. #endif
  16482. #ifdef __LITTLE_ENDIAN__
  16483. __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
  16484. int32x4_t __ret;
  16485. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
  16486. return __ret;
  16487. }
  16488. #else
  16489. __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
  16490. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16491. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16492. int32x4_t __ret;
  16493. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
  16494. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16495. return __ret;
  16496. }
  16497. __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
  16498. int32x4_t __ret;
  16499. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
  16500. return __ret;
  16501. }
  16502. #endif
  16503. #ifdef __LITTLE_ENDIAN__
  16504. #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16505. int32x2_t __s0 = __p0; \
  16506. int32x2_t __s1 = __p1; \
  16507. int64x2_t __ret; \
  16508. __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  16509. __ret; \
  16510. })
  16511. #else
  16512. #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16513. int32x2_t __s0 = __p0; \
  16514. int32x2_t __s1 = __p1; \
  16515. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  16516. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16517. int64x2_t __ret; \
  16518. __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  16519. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  16520. __ret; \
  16521. })
  16522. #endif
  16523. #ifdef __LITTLE_ENDIAN__
  16524. #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16525. int16x4_t __s0 = __p0; \
  16526. int16x4_t __s1 = __p1; \
  16527. int32x4_t __ret; \
  16528. __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  16529. __ret; \
  16530. })
  16531. #else
  16532. #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16533. int16x4_t __s0 = __p0; \
  16534. int16x4_t __s1 = __p1; \
  16535. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16536. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16537. int32x4_t __ret; \
  16538. __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  16539. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16540. __ret; \
  16541. })
  16542. #endif
  16543. #ifdef __LITTLE_ENDIAN__
  16544. __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
  16545. int64x2_t __ret;
  16546. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  16547. return __ret;
  16548. }
  16549. #else
  16550. __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
  16551. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16552. int64x2_t __ret;
  16553. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  16554. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16555. return __ret;
  16556. }
  16557. __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
  16558. int64x2_t __ret;
  16559. __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
  16560. return __ret;
  16561. }
  16562. #endif
  16563. #ifdef __LITTLE_ENDIAN__
  16564. __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
  16565. int32x4_t __ret;
  16566. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  16567. return __ret;
  16568. }
  16569. #else
  16570. __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
  16571. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16572. int32x4_t __ret;
  16573. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  16574. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16575. return __ret;
  16576. }
  16577. __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
  16578. int32x4_t __ret;
  16579. __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
  16580. return __ret;
  16581. }
  16582. #endif
  16583. #ifdef __LITTLE_ENDIAN__
  16584. __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
  16585. uint16x4_t __ret;
  16586. __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
  16587. return __ret;
  16588. }
  16589. #else
  16590. __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
  16591. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16592. uint16x4_t __ret;
  16593. __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
  16594. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16595. return __ret;
  16596. }
  16597. __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
  16598. uint16x4_t __ret;
  16599. __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
  16600. return __ret;
  16601. }
  16602. #endif
  16603. #ifdef __LITTLE_ENDIAN__
  16604. __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
  16605. uint32x2_t __ret;
  16606. __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
  16607. return __ret;
  16608. }
  16609. #else
  16610. __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
  16611. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16612. uint32x2_t __ret;
  16613. __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
  16614. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16615. return __ret;
  16616. }
  16617. __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
  16618. uint32x2_t __ret;
  16619. __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
  16620. return __ret;
  16621. }
  16622. #endif
  16623. #ifdef __LITTLE_ENDIAN__
  16624. __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
  16625. uint8x8_t __ret;
  16626. __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
  16627. return __ret;
  16628. }
  16629. #else
  16630. __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
  16631. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16632. uint8x8_t __ret;
  16633. __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
  16634. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16635. return __ret;
  16636. }
  16637. __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
  16638. uint8x8_t __ret;
  16639. __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
  16640. return __ret;
  16641. }
  16642. #endif
  16643. #ifdef __LITTLE_ENDIAN__
  16644. __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
  16645. int16x4_t __ret;
  16646. __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
  16647. return __ret;
  16648. }
  16649. #else
  16650. __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
  16651. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16652. int16x4_t __ret;
  16653. __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
  16654. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16655. return __ret;
  16656. }
  16657. __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
  16658. int16x4_t __ret;
  16659. __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
  16660. return __ret;
  16661. }
  16662. #endif
  16663. #ifdef __LITTLE_ENDIAN__
  16664. __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
  16665. int32x2_t __ret;
  16666. __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
  16667. return __ret;
  16668. }
  16669. #else
  16670. __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
  16671. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16672. int32x2_t __ret;
  16673. __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
  16674. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16675. return __ret;
  16676. }
  16677. __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
  16678. int32x2_t __ret;
  16679. __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
  16680. return __ret;
  16681. }
  16682. #endif
  16683. #ifdef __LITTLE_ENDIAN__
  16684. __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
  16685. int8x8_t __ret;
  16686. __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
  16687. return __ret;
  16688. }
  16689. #else
  16690. __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
  16691. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16692. int8x8_t __ret;
  16693. __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
  16694. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16695. return __ret;
  16696. }
  16697. __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
  16698. int8x8_t __ret;
  16699. __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
  16700. return __ret;
  16701. }
  16702. #endif
  16703. #ifdef __LITTLE_ENDIAN__
  16704. __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
  16705. uint16x4_t __ret;
  16706. __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
  16707. return __ret;
  16708. }
  16709. #else
  16710. __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
  16711. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16712. uint16x4_t __ret;
  16713. __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
  16714. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16715. return __ret;
  16716. }
  16717. __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
  16718. uint16x4_t __ret;
  16719. __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
  16720. return __ret;
  16721. }
  16722. #endif
  16723. #ifdef __LITTLE_ENDIAN__
  16724. __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
  16725. uint32x2_t __ret;
  16726. __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
  16727. return __ret;
  16728. }
  16729. #else
  16730. __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
  16731. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16732. uint32x2_t __ret;
  16733. __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
  16734. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16735. return __ret;
  16736. }
  16737. __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
  16738. uint32x2_t __ret;
  16739. __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
  16740. return __ret;
  16741. }
  16742. #endif
  16743. #ifdef __LITTLE_ENDIAN__
  16744. __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
  16745. uint8x8_t __ret;
  16746. __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
  16747. return __ret;
  16748. }
  16749. #else
  16750. __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
  16751. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16752. uint8x8_t __ret;
  16753. __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
  16754. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16755. return __ret;
  16756. }
  16757. __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
  16758. uint8x8_t __ret;
  16759. __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
  16760. return __ret;
  16761. }
  16762. #endif
  16763. #ifdef __LITTLE_ENDIAN__
  16764. __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
  16765. int8x16_t __ret;
  16766. __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
  16767. return __ret;
  16768. }
  16769. #else
  16770. __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
  16771. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  16772. int8x16_t __ret;
  16773. __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
  16774. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  16775. return __ret;
  16776. }
  16777. #endif
  16778. #ifdef __LITTLE_ENDIAN__
  16779. __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
  16780. int32x4_t __ret;
  16781. __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
  16782. return __ret;
  16783. }
  16784. #else
  16785. __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
  16786. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16787. int32x4_t __ret;
  16788. __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
  16789. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16790. return __ret;
  16791. }
  16792. #endif
  16793. #ifdef __LITTLE_ENDIAN__
  16794. __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
  16795. int16x8_t __ret;
  16796. __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
  16797. return __ret;
  16798. }
  16799. #else
  16800. __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
  16801. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16802. int16x8_t __ret;
  16803. __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
  16804. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16805. return __ret;
  16806. }
  16807. #endif
  16808. #ifdef __LITTLE_ENDIAN__
  16809. __ai int8x8_t vqneg_s8(int8x8_t __p0) {
  16810. int8x8_t __ret;
  16811. __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
  16812. return __ret;
  16813. }
  16814. #else
  16815. __ai int8x8_t vqneg_s8(int8x8_t __p0) {
  16816. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16817. int8x8_t __ret;
  16818. __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
  16819. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16820. return __ret;
  16821. }
  16822. #endif
  16823. #ifdef __LITTLE_ENDIAN__
  16824. __ai int32x2_t vqneg_s32(int32x2_t __p0) {
  16825. int32x2_t __ret;
  16826. __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
  16827. return __ret;
  16828. }
  16829. #else
  16830. __ai int32x2_t vqneg_s32(int32x2_t __p0) {
  16831. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16832. int32x2_t __ret;
  16833. __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
  16834. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16835. return __ret;
  16836. }
  16837. #endif
  16838. #ifdef __LITTLE_ENDIAN__
  16839. __ai int16x4_t vqneg_s16(int16x4_t __p0) {
  16840. int16x4_t __ret;
  16841. __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
  16842. return __ret;
  16843. }
  16844. #else
  16845. __ai int16x4_t vqneg_s16(int16x4_t __p0) {
  16846. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16847. int16x4_t __ret;
  16848. __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
  16849. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16850. return __ret;
  16851. }
  16852. #endif
  16853. #ifdef __LITTLE_ENDIAN__
  16854. __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16855. int32x4_t __ret;
  16856. __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  16857. return __ret;
  16858. }
  16859. #else
  16860. __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16861. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16862. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16863. int32x4_t __ret;
  16864. __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  16865. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16866. return __ret;
  16867. }
  16868. __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
  16869. int32x4_t __ret;
  16870. __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  16871. return __ret;
  16872. }
  16873. #endif
  16874. #ifdef __LITTLE_ENDIAN__
  16875. __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16876. int16x8_t __ret;
  16877. __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  16878. return __ret;
  16879. }
  16880. #else
  16881. __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16882. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  16883. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  16884. int16x8_t __ret;
  16885. __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  16886. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  16887. return __ret;
  16888. }
  16889. __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
  16890. int16x8_t __ret;
  16891. __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  16892. return __ret;
  16893. }
  16894. #endif
  16895. #ifdef __LITTLE_ENDIAN__
  16896. __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16897. int32x2_t __ret;
  16898. __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  16899. return __ret;
  16900. }
  16901. #else
  16902. __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16903. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  16904. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  16905. int32x2_t __ret;
  16906. __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  16907. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  16908. return __ret;
  16909. }
  16910. __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
  16911. int32x2_t __ret;
  16912. __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  16913. return __ret;
  16914. }
  16915. #endif
  16916. #ifdef __LITTLE_ENDIAN__
  16917. __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16918. int16x4_t __ret;
  16919. __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  16920. return __ret;
  16921. }
  16922. #else
  16923. __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16924. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  16925. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  16926. int16x4_t __ret;
  16927. __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  16928. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  16929. return __ret;
  16930. }
  16931. __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
  16932. int16x4_t __ret;
  16933. __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  16934. return __ret;
  16935. }
  16936. #endif
  16937. #ifdef __LITTLE_ENDIAN__
  16938. #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16939. int32x4_t __s0 = __p0; \
  16940. int32x2_t __s1 = __p1; \
  16941. int32x4_t __ret; \
  16942. __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  16943. __ret; \
  16944. })
  16945. #else
  16946. #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16947. int32x4_t __s0 = __p0; \
  16948. int32x2_t __s1 = __p1; \
  16949. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  16950. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16951. int32x4_t __ret; \
  16952. __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  16953. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  16954. __ret; \
  16955. })
  16956. #endif
  16957. #ifdef __LITTLE_ENDIAN__
  16958. #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16959. int16x8_t __s0 = __p0; \
  16960. int16x4_t __s1 = __p1; \
  16961. int16x8_t __ret; \
  16962. __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  16963. __ret; \
  16964. })
  16965. #else
  16966. #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16967. int16x8_t __s0 = __p0; \
  16968. int16x4_t __s1 = __p1; \
  16969. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  16970. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  16971. int16x8_t __ret; \
  16972. __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  16973. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  16974. __ret; \
  16975. })
  16976. #endif
  16977. #ifdef __LITTLE_ENDIAN__
  16978. #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16979. int32x2_t __s0 = __p0; \
  16980. int32x2_t __s1 = __p1; \
  16981. int32x2_t __ret; \
  16982. __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  16983. __ret; \
  16984. })
  16985. #else
  16986. #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  16987. int32x2_t __s0 = __p0; \
  16988. int32x2_t __s1 = __p1; \
  16989. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  16990. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  16991. int32x2_t __ret; \
  16992. __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  16993. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  16994. __ret; \
  16995. })
  16996. #endif
  16997. #ifdef __LITTLE_ENDIAN__
  16998. #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  16999. int16x4_t __s0 = __p0; \
  17000. int16x4_t __s1 = __p1; \
  17001. int16x4_t __ret; \
  17002. __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  17003. __ret; \
  17004. })
  17005. #else
  17006. #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  17007. int16x4_t __s0 = __p0; \
  17008. int16x4_t __s1 = __p1; \
  17009. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17010. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  17011. int16x4_t __ret; \
  17012. __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  17013. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17014. __ret; \
  17015. })
  17016. #endif
  17017. #ifdef __LITTLE_ENDIAN__
  17018. __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
  17019. int32x4_t __ret;
  17020. __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
  17021. return __ret;
  17022. }
  17023. #else
  17024. __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
  17025. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17026. int32x4_t __ret;
  17027. __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
  17028. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17029. return __ret;
  17030. }
  17031. #endif
  17032. #ifdef __LITTLE_ENDIAN__
  17033. __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
  17034. int16x8_t __ret;
  17035. __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
  17036. return __ret;
  17037. }
  17038. #else
  17039. __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
  17040. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17041. int16x8_t __ret;
  17042. __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
  17043. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17044. return __ret;
  17045. }
  17046. #endif
  17047. #ifdef __LITTLE_ENDIAN__
  17048. __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
  17049. int32x2_t __ret;
  17050. __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
  17051. return __ret;
  17052. }
  17053. #else
  17054. __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
  17055. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17056. int32x2_t __ret;
  17057. __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
  17058. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17059. return __ret;
  17060. }
  17061. #endif
  17062. #ifdef __LITTLE_ENDIAN__
  17063. __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
  17064. int16x4_t __ret;
  17065. __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
  17066. return __ret;
  17067. }
  17068. #else
  17069. __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
  17070. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17071. int16x4_t __ret;
  17072. __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
  17073. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17074. return __ret;
  17075. }
  17076. #endif
  17077. #ifdef __LITTLE_ENDIAN__
  17078. __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  17079. uint8x16_t __ret;
  17080. __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  17081. return __ret;
  17082. }
  17083. #else
  17084. __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  17085. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17086. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17087. uint8x16_t __ret;
  17088. __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  17089. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17090. return __ret;
  17091. }
  17092. #endif
  17093. #ifdef __LITTLE_ENDIAN__
  17094. __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  17095. uint32x4_t __ret;
  17096. __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  17097. return __ret;
  17098. }
  17099. #else
  17100. __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  17101. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17102. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17103. uint32x4_t __ret;
  17104. __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  17105. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17106. return __ret;
  17107. }
  17108. #endif
  17109. #ifdef __LITTLE_ENDIAN__
  17110. __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  17111. uint64x2_t __ret;
  17112. __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  17113. return __ret;
  17114. }
  17115. #else
  17116. __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  17117. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17118. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17119. uint64x2_t __ret;
  17120. __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  17121. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17122. return __ret;
  17123. }
  17124. #endif
  17125. #ifdef __LITTLE_ENDIAN__
  17126. __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  17127. uint16x8_t __ret;
  17128. __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  17129. return __ret;
  17130. }
  17131. #else
  17132. __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  17133. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17134. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17135. uint16x8_t __ret;
  17136. __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  17137. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17138. return __ret;
  17139. }
  17140. #endif
  17141. #ifdef __LITTLE_ENDIAN__
  17142. __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  17143. int8x16_t __ret;
  17144. __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  17145. return __ret;
  17146. }
  17147. #else
  17148. __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  17149. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17150. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17151. int8x16_t __ret;
  17152. __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  17153. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17154. return __ret;
  17155. }
  17156. #endif
  17157. #ifdef __LITTLE_ENDIAN__
  17158. __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  17159. int32x4_t __ret;
  17160. __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  17161. return __ret;
  17162. }
  17163. #else
  17164. __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  17165. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17166. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17167. int32x4_t __ret;
  17168. __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  17169. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17170. return __ret;
  17171. }
  17172. #endif
  17173. #ifdef __LITTLE_ENDIAN__
  17174. __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  17175. int64x2_t __ret;
  17176. __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  17177. return __ret;
  17178. }
  17179. #else
  17180. __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  17181. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17182. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17183. int64x2_t __ret;
  17184. __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  17185. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17186. return __ret;
  17187. }
  17188. #endif
  17189. #ifdef __LITTLE_ENDIAN__
  17190. __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  17191. int16x8_t __ret;
  17192. __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  17193. return __ret;
  17194. }
  17195. #else
  17196. __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  17197. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17198. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17199. int16x8_t __ret;
  17200. __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  17201. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17202. return __ret;
  17203. }
  17204. #endif
  17205. #ifdef __LITTLE_ENDIAN__
  17206. __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  17207. uint8x8_t __ret;
  17208. __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  17209. return __ret;
  17210. }
  17211. #else
  17212. __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  17213. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17214. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17215. uint8x8_t __ret;
  17216. __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  17217. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17218. return __ret;
  17219. }
  17220. #endif
  17221. #ifdef __LITTLE_ENDIAN__
  17222. __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  17223. uint32x2_t __ret;
  17224. __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  17225. return __ret;
  17226. }
  17227. #else
  17228. __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  17229. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17230. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17231. uint32x2_t __ret;
  17232. __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  17233. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17234. return __ret;
  17235. }
  17236. #endif
  17237. #ifdef __LITTLE_ENDIAN__
  17238. __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  17239. uint64x1_t __ret;
  17240. __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  17241. return __ret;
  17242. }
  17243. #else
  17244. __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  17245. uint64x1_t __ret;
  17246. __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  17247. return __ret;
  17248. }
  17249. #endif
  17250. #ifdef __LITTLE_ENDIAN__
  17251. __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  17252. uint16x4_t __ret;
  17253. __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  17254. return __ret;
  17255. }
  17256. #else
  17257. __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  17258. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17259. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17260. uint16x4_t __ret;
  17261. __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  17262. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17263. return __ret;
  17264. }
  17265. #endif
  17266. #ifdef __LITTLE_ENDIAN__
  17267. __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
  17268. int8x8_t __ret;
  17269. __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  17270. return __ret;
  17271. }
  17272. #else
  17273. __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
  17274. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17275. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17276. int8x8_t __ret;
  17277. __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  17278. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17279. return __ret;
  17280. }
  17281. #endif
  17282. #ifdef __LITTLE_ENDIAN__
  17283. __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
  17284. int32x2_t __ret;
  17285. __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  17286. return __ret;
  17287. }
  17288. #else
  17289. __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
  17290. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17291. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17292. int32x2_t __ret;
  17293. __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  17294. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17295. return __ret;
  17296. }
  17297. #endif
  17298. #ifdef __LITTLE_ENDIAN__
  17299. __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
  17300. int64x1_t __ret;
  17301. __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  17302. return __ret;
  17303. }
  17304. #else
  17305. __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
  17306. int64x1_t __ret;
  17307. __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  17308. return __ret;
  17309. }
  17310. #endif
  17311. #ifdef __LITTLE_ENDIAN__
  17312. __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
  17313. int16x4_t __ret;
  17314. __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  17315. return __ret;
  17316. }
  17317. #else
  17318. __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
  17319. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17320. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17321. int16x4_t __ret;
  17322. __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  17323. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17324. return __ret;
  17325. }
  17326. #endif
  17327. #ifdef __LITTLE_ENDIAN__
  17328. #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
  17329. uint32x4_t __s0 = __p0; \
  17330. uint16x4_t __ret; \
  17331. __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
  17332. __ret; \
  17333. })
  17334. #else
  17335. #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
  17336. uint32x4_t __s0 = __p0; \
  17337. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17338. uint16x4_t __ret; \
  17339. __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
  17340. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17341. __ret; \
  17342. })
  17343. #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
  17344. uint32x4_t __s0 = __p0; \
  17345. uint16x4_t __ret; \
  17346. __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
  17347. __ret; \
  17348. })
  17349. #endif
  17350. #ifdef __LITTLE_ENDIAN__
  17351. #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
  17352. uint64x2_t __s0 = __p0; \
  17353. uint32x2_t __ret; \
  17354. __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
  17355. __ret; \
  17356. })
  17357. #else
  17358. #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
  17359. uint64x2_t __s0 = __p0; \
  17360. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17361. uint32x2_t __ret; \
  17362. __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
  17363. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17364. __ret; \
  17365. })
  17366. #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
  17367. uint64x2_t __s0 = __p0; \
  17368. uint32x2_t __ret; \
  17369. __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
  17370. __ret; \
  17371. })
  17372. #endif
  17373. #ifdef __LITTLE_ENDIAN__
  17374. #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
  17375. uint16x8_t __s0 = __p0; \
  17376. uint8x8_t __ret; \
  17377. __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
  17378. __ret; \
  17379. })
  17380. #else
  17381. #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
  17382. uint16x8_t __s0 = __p0; \
  17383. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17384. uint8x8_t __ret; \
  17385. __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
  17386. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17387. __ret; \
  17388. })
  17389. #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
  17390. uint16x8_t __s0 = __p0; \
  17391. uint8x8_t __ret; \
  17392. __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
  17393. __ret; \
  17394. })
  17395. #endif
  17396. #ifdef __LITTLE_ENDIAN__
  17397. #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
  17398. int32x4_t __s0 = __p0; \
  17399. int16x4_t __ret; \
  17400. __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
  17401. __ret; \
  17402. })
  17403. #else
  17404. #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
  17405. int32x4_t __s0 = __p0; \
  17406. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17407. int16x4_t __ret; \
  17408. __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
  17409. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17410. __ret; \
  17411. })
  17412. #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
  17413. int32x4_t __s0 = __p0; \
  17414. int16x4_t __ret; \
  17415. __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
  17416. __ret; \
  17417. })
  17418. #endif
  17419. #ifdef __LITTLE_ENDIAN__
  17420. #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
  17421. int64x2_t __s0 = __p0; \
  17422. int32x2_t __ret; \
  17423. __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
  17424. __ret; \
  17425. })
  17426. #else
  17427. #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
  17428. int64x2_t __s0 = __p0; \
  17429. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17430. int32x2_t __ret; \
  17431. __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
  17432. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17433. __ret; \
  17434. })
  17435. #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
  17436. int64x2_t __s0 = __p0; \
  17437. int32x2_t __ret; \
  17438. __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
  17439. __ret; \
  17440. })
  17441. #endif
  17442. #ifdef __LITTLE_ENDIAN__
  17443. #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
  17444. int16x8_t __s0 = __p0; \
  17445. int8x8_t __ret; \
  17446. __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
  17447. __ret; \
  17448. })
  17449. #else
  17450. #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
  17451. int16x8_t __s0 = __p0; \
  17452. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17453. int8x8_t __ret; \
  17454. __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
  17455. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17456. __ret; \
  17457. })
  17458. #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
  17459. int16x8_t __s0 = __p0; \
  17460. int8x8_t __ret; \
  17461. __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
  17462. __ret; \
  17463. })
  17464. #endif
  17465. #ifdef __LITTLE_ENDIAN__
  17466. #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
  17467. int32x4_t __s0 = __p0; \
  17468. uint16x4_t __ret; \
  17469. __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
  17470. __ret; \
  17471. })
  17472. #else
  17473. #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
  17474. int32x4_t __s0 = __p0; \
  17475. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17476. uint16x4_t __ret; \
  17477. __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
  17478. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17479. __ret; \
  17480. })
  17481. #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
  17482. int32x4_t __s0 = __p0; \
  17483. uint16x4_t __ret; \
  17484. __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
  17485. __ret; \
  17486. })
  17487. #endif
  17488. #ifdef __LITTLE_ENDIAN__
  17489. #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
  17490. int64x2_t __s0 = __p0; \
  17491. uint32x2_t __ret; \
  17492. __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
  17493. __ret; \
  17494. })
  17495. #else
  17496. #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
  17497. int64x2_t __s0 = __p0; \
  17498. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17499. uint32x2_t __ret; \
  17500. __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
  17501. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17502. __ret; \
  17503. })
  17504. #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
  17505. int64x2_t __s0 = __p0; \
  17506. uint32x2_t __ret; \
  17507. __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
  17508. __ret; \
  17509. })
  17510. #endif
  17511. #ifdef __LITTLE_ENDIAN__
  17512. #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
  17513. int16x8_t __s0 = __p0; \
  17514. uint8x8_t __ret; \
  17515. __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
  17516. __ret; \
  17517. })
  17518. #else
  17519. #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
  17520. int16x8_t __s0 = __p0; \
  17521. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17522. uint8x8_t __ret; \
  17523. __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
  17524. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17525. __ret; \
  17526. })
  17527. #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
  17528. int16x8_t __s0 = __p0; \
  17529. uint8x8_t __ret; \
  17530. __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
  17531. __ret; \
  17532. })
  17533. #endif
  17534. #ifdef __LITTLE_ENDIAN__
  17535. __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  17536. uint8x16_t __ret;
  17537. __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  17538. return __ret;
  17539. }
  17540. #else
  17541. __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  17542. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17543. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17544. uint8x16_t __ret;
  17545. __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  17546. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17547. return __ret;
  17548. }
  17549. #endif
  17550. #ifdef __LITTLE_ENDIAN__
  17551. __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  17552. uint32x4_t __ret;
  17553. __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  17554. return __ret;
  17555. }
  17556. #else
  17557. __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  17558. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17559. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17560. uint32x4_t __ret;
  17561. __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  17562. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17563. return __ret;
  17564. }
  17565. #endif
  17566. #ifdef __LITTLE_ENDIAN__
  17567. __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  17568. uint64x2_t __ret;
  17569. __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  17570. return __ret;
  17571. }
  17572. #else
  17573. __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  17574. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17575. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17576. uint64x2_t __ret;
  17577. __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  17578. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17579. return __ret;
  17580. }
  17581. #endif
  17582. #ifdef __LITTLE_ENDIAN__
  17583. __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  17584. uint16x8_t __ret;
  17585. __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  17586. return __ret;
  17587. }
  17588. #else
  17589. __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  17590. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17591. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17592. uint16x8_t __ret;
  17593. __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  17594. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17595. return __ret;
  17596. }
  17597. #endif
  17598. #ifdef __LITTLE_ENDIAN__
  17599. __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  17600. int8x16_t __ret;
  17601. __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  17602. return __ret;
  17603. }
  17604. #else
  17605. __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  17606. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17607. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17608. int8x16_t __ret;
  17609. __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  17610. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  17611. return __ret;
  17612. }
  17613. #endif
  17614. #ifdef __LITTLE_ENDIAN__
  17615. __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  17616. int32x4_t __ret;
  17617. __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  17618. return __ret;
  17619. }
  17620. #else
  17621. __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  17622. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17623. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17624. int32x4_t __ret;
  17625. __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  17626. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17627. return __ret;
  17628. }
  17629. #endif
  17630. #ifdef __LITTLE_ENDIAN__
  17631. __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  17632. int64x2_t __ret;
  17633. __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  17634. return __ret;
  17635. }
  17636. #else
  17637. __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  17638. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17639. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17640. int64x2_t __ret;
  17641. __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  17642. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17643. return __ret;
  17644. }
  17645. #endif
  17646. #ifdef __LITTLE_ENDIAN__
  17647. __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  17648. int16x8_t __ret;
  17649. __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  17650. return __ret;
  17651. }
  17652. #else
  17653. __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  17654. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17655. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17656. int16x8_t __ret;
  17657. __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  17658. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17659. return __ret;
  17660. }
  17661. #endif
  17662. #ifdef __LITTLE_ENDIAN__
  17663. __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  17664. uint8x8_t __ret;
  17665. __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  17666. return __ret;
  17667. }
  17668. #else
  17669. __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  17670. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17671. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17672. uint8x8_t __ret;
  17673. __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  17674. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17675. return __ret;
  17676. }
  17677. #endif
  17678. #ifdef __LITTLE_ENDIAN__
  17679. __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  17680. uint32x2_t __ret;
  17681. __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  17682. return __ret;
  17683. }
  17684. #else
  17685. __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  17686. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17687. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17688. uint32x2_t __ret;
  17689. __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  17690. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17691. return __ret;
  17692. }
  17693. #endif
  17694. #ifdef __LITTLE_ENDIAN__
  17695. __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  17696. uint64x1_t __ret;
  17697. __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  17698. return __ret;
  17699. }
  17700. #else
  17701. __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  17702. uint64x1_t __ret;
  17703. __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  17704. return __ret;
  17705. }
  17706. #endif
  17707. #ifdef __LITTLE_ENDIAN__
  17708. __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  17709. uint16x4_t __ret;
  17710. __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  17711. return __ret;
  17712. }
  17713. #else
  17714. __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  17715. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17716. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17717. uint16x4_t __ret;
  17718. __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  17719. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17720. return __ret;
  17721. }
  17722. #endif
  17723. #ifdef __LITTLE_ENDIAN__
  17724. __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
  17725. int8x8_t __ret;
  17726. __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  17727. return __ret;
  17728. }
  17729. #else
  17730. __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
  17731. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  17732. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  17733. int8x8_t __ret;
  17734. __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  17735. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  17736. return __ret;
  17737. }
  17738. #endif
  17739. #ifdef __LITTLE_ENDIAN__
  17740. __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
  17741. int32x2_t __ret;
  17742. __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  17743. return __ret;
  17744. }
  17745. #else
  17746. __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
  17747. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  17748. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  17749. int32x2_t __ret;
  17750. __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  17751. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  17752. return __ret;
  17753. }
  17754. #endif
  17755. #ifdef __LITTLE_ENDIAN__
  17756. __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
  17757. int64x1_t __ret;
  17758. __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  17759. return __ret;
  17760. }
  17761. #else
  17762. __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
  17763. int64x1_t __ret;
  17764. __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  17765. return __ret;
  17766. }
  17767. #endif
  17768. #ifdef __LITTLE_ENDIAN__
  17769. __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
  17770. int16x4_t __ret;
  17771. __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  17772. return __ret;
  17773. }
  17774. #else
  17775. __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
  17776. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  17777. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  17778. int16x4_t __ret;
  17779. __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  17780. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  17781. return __ret;
  17782. }
  17783. #endif
  17784. #ifdef __LITTLE_ENDIAN__
  17785. #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
  17786. uint8x16_t __s0 = __p0; \
  17787. uint8x16_t __ret; \
  17788. __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
  17789. __ret; \
  17790. })
  17791. #else
  17792. #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
  17793. uint8x16_t __s0 = __p0; \
  17794. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  17795. uint8x16_t __ret; \
  17796. __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
  17797. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  17798. __ret; \
  17799. })
  17800. #endif
  17801. #ifdef __LITTLE_ENDIAN__
  17802. #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
  17803. uint32x4_t __s0 = __p0; \
  17804. uint32x4_t __ret; \
  17805. __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
  17806. __ret; \
  17807. })
  17808. #else
  17809. #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
  17810. uint32x4_t __s0 = __p0; \
  17811. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17812. uint32x4_t __ret; \
  17813. __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
  17814. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17815. __ret; \
  17816. })
  17817. #endif
  17818. #ifdef __LITTLE_ENDIAN__
  17819. #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
  17820. uint64x2_t __s0 = __p0; \
  17821. uint64x2_t __ret; \
  17822. __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
  17823. __ret; \
  17824. })
  17825. #else
  17826. #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
  17827. uint64x2_t __s0 = __p0; \
  17828. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17829. uint64x2_t __ret; \
  17830. __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
  17831. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17832. __ret; \
  17833. })
  17834. #endif
  17835. #ifdef __LITTLE_ENDIAN__
  17836. #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
  17837. uint16x8_t __s0 = __p0; \
  17838. uint16x8_t __ret; \
  17839. __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
  17840. __ret; \
  17841. })
  17842. #else
  17843. #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
  17844. uint16x8_t __s0 = __p0; \
  17845. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17846. uint16x8_t __ret; \
  17847. __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
  17848. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17849. __ret; \
  17850. })
  17851. #endif
  17852. #ifdef __LITTLE_ENDIAN__
  17853. #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
  17854. int8x16_t __s0 = __p0; \
  17855. int8x16_t __ret; \
  17856. __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
  17857. __ret; \
  17858. })
  17859. #else
  17860. #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
  17861. int8x16_t __s0 = __p0; \
  17862. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  17863. int8x16_t __ret; \
  17864. __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
  17865. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  17866. __ret; \
  17867. })
  17868. #endif
  17869. #ifdef __LITTLE_ENDIAN__
  17870. #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
  17871. int32x4_t __s0 = __p0; \
  17872. int32x4_t __ret; \
  17873. __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
  17874. __ret; \
  17875. })
  17876. #else
  17877. #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
  17878. int32x4_t __s0 = __p0; \
  17879. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17880. int32x4_t __ret; \
  17881. __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
  17882. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17883. __ret; \
  17884. })
  17885. #endif
  17886. #ifdef __LITTLE_ENDIAN__
  17887. #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
  17888. int64x2_t __s0 = __p0; \
  17889. int64x2_t __ret; \
  17890. __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
  17891. __ret; \
  17892. })
  17893. #else
  17894. #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
  17895. int64x2_t __s0 = __p0; \
  17896. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17897. int64x2_t __ret; \
  17898. __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
  17899. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17900. __ret; \
  17901. })
  17902. #endif
  17903. #ifdef __LITTLE_ENDIAN__
  17904. #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
  17905. int16x8_t __s0 = __p0; \
  17906. int16x8_t __ret; \
  17907. __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
  17908. __ret; \
  17909. })
  17910. #else
  17911. #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
  17912. int16x8_t __s0 = __p0; \
  17913. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17914. int16x8_t __ret; \
  17915. __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
  17916. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17917. __ret; \
  17918. })
  17919. #endif
  17920. #ifdef __LITTLE_ENDIAN__
  17921. #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
  17922. uint8x8_t __s0 = __p0; \
  17923. uint8x8_t __ret; \
  17924. __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
  17925. __ret; \
  17926. })
  17927. #else
  17928. #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
  17929. uint8x8_t __s0 = __p0; \
  17930. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17931. uint8x8_t __ret; \
  17932. __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
  17933. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  17934. __ret; \
  17935. })
  17936. #endif
  17937. #ifdef __LITTLE_ENDIAN__
  17938. #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
  17939. uint32x2_t __s0 = __p0; \
  17940. uint32x2_t __ret; \
  17941. __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
  17942. __ret; \
  17943. })
  17944. #else
  17945. #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
  17946. uint32x2_t __s0 = __p0; \
  17947. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  17948. uint32x2_t __ret; \
  17949. __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
  17950. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  17951. __ret; \
  17952. })
  17953. #endif
  17954. #ifdef __LITTLE_ENDIAN__
  17955. #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
  17956. uint64x1_t __s0 = __p0; \
  17957. uint64x1_t __ret; \
  17958. __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
  17959. __ret; \
  17960. })
  17961. #else
  17962. #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
  17963. uint64x1_t __s0 = __p0; \
  17964. uint64x1_t __ret; \
  17965. __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
  17966. __ret; \
  17967. })
  17968. #endif
  17969. #ifdef __LITTLE_ENDIAN__
  17970. #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
  17971. uint16x4_t __s0 = __p0; \
  17972. uint16x4_t __ret; \
  17973. __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
  17974. __ret; \
  17975. })
  17976. #else
  17977. #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
  17978. uint16x4_t __s0 = __p0; \
  17979. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  17980. uint16x4_t __ret; \
  17981. __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
  17982. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  17983. __ret; \
  17984. })
  17985. #endif
  17986. #ifdef __LITTLE_ENDIAN__
  17987. #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
  17988. int8x8_t __s0 = __p0; \
  17989. int8x8_t __ret; \
  17990. __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
  17991. __ret; \
  17992. })
  17993. #else
  17994. #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
  17995. int8x8_t __s0 = __p0; \
  17996. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  17997. int8x8_t __ret; \
  17998. __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
  17999. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18000. __ret; \
  18001. })
  18002. #endif
  18003. #ifdef __LITTLE_ENDIAN__
  18004. #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
  18005. int32x2_t __s0 = __p0; \
  18006. int32x2_t __ret; \
  18007. __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
  18008. __ret; \
  18009. })
  18010. #else
  18011. #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
  18012. int32x2_t __s0 = __p0; \
  18013. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18014. int32x2_t __ret; \
  18015. __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
  18016. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18017. __ret; \
  18018. })
  18019. #endif
  18020. #ifdef __LITTLE_ENDIAN__
  18021. #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
  18022. int64x1_t __s0 = __p0; \
  18023. int64x1_t __ret; \
  18024. __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
  18025. __ret; \
  18026. })
  18027. #else
  18028. #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
  18029. int64x1_t __s0 = __p0; \
  18030. int64x1_t __ret; \
  18031. __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
  18032. __ret; \
  18033. })
  18034. #endif
  18035. #ifdef __LITTLE_ENDIAN__
  18036. #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
  18037. int16x4_t __s0 = __p0; \
  18038. int16x4_t __ret; \
  18039. __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
  18040. __ret; \
  18041. })
  18042. #else
  18043. #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
  18044. int16x4_t __s0 = __p0; \
  18045. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18046. int16x4_t __ret; \
  18047. __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
  18048. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18049. __ret; \
  18050. })
  18051. #endif
  18052. #ifdef __LITTLE_ENDIAN__
  18053. #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
  18054. int8x16_t __s0 = __p0; \
  18055. uint8x16_t __ret; \
  18056. __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
  18057. __ret; \
  18058. })
  18059. #else
  18060. #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
  18061. int8x16_t __s0 = __p0; \
  18062. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  18063. uint8x16_t __ret; \
  18064. __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
  18065. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  18066. __ret; \
  18067. })
  18068. #endif
  18069. #ifdef __LITTLE_ENDIAN__
  18070. #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
  18071. int32x4_t __s0 = __p0; \
  18072. uint32x4_t __ret; \
  18073. __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
  18074. __ret; \
  18075. })
  18076. #else
  18077. #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
  18078. int32x4_t __s0 = __p0; \
  18079. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18080. uint32x4_t __ret; \
  18081. __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
  18082. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18083. __ret; \
  18084. })
  18085. #endif
  18086. #ifdef __LITTLE_ENDIAN__
  18087. #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
  18088. int64x2_t __s0 = __p0; \
  18089. uint64x2_t __ret; \
  18090. __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
  18091. __ret; \
  18092. })
  18093. #else
  18094. #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
  18095. int64x2_t __s0 = __p0; \
  18096. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18097. uint64x2_t __ret; \
  18098. __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
  18099. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18100. __ret; \
  18101. })
  18102. #endif
  18103. #ifdef __LITTLE_ENDIAN__
  18104. #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
  18105. int16x8_t __s0 = __p0; \
  18106. uint16x8_t __ret; \
  18107. __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
  18108. __ret; \
  18109. })
  18110. #else
  18111. #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
  18112. int16x8_t __s0 = __p0; \
  18113. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  18114. uint16x8_t __ret; \
  18115. __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
  18116. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18117. __ret; \
  18118. })
  18119. #endif
  18120. #ifdef __LITTLE_ENDIAN__
  18121. #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
  18122. int8x8_t __s0 = __p0; \
  18123. uint8x8_t __ret; \
  18124. __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
  18125. __ret; \
  18126. })
  18127. #else
  18128. #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
  18129. int8x8_t __s0 = __p0; \
  18130. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  18131. uint8x8_t __ret; \
  18132. __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
  18133. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18134. __ret; \
  18135. })
  18136. #endif
  18137. #ifdef __LITTLE_ENDIAN__
  18138. #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
  18139. int32x2_t __s0 = __p0; \
  18140. uint32x2_t __ret; \
  18141. __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
  18142. __ret; \
  18143. })
  18144. #else
  18145. #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
  18146. int32x2_t __s0 = __p0; \
  18147. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18148. uint32x2_t __ret; \
  18149. __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
  18150. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18151. __ret; \
  18152. })
  18153. #endif
  18154. #ifdef __LITTLE_ENDIAN__
  18155. #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
  18156. int64x1_t __s0 = __p0; \
  18157. uint64x1_t __ret; \
  18158. __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
  18159. __ret; \
  18160. })
  18161. #else
  18162. #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
  18163. int64x1_t __s0 = __p0; \
  18164. uint64x1_t __ret; \
  18165. __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
  18166. __ret; \
  18167. })
  18168. #endif
  18169. #ifdef __LITTLE_ENDIAN__
  18170. #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
  18171. int16x4_t __s0 = __p0; \
  18172. uint16x4_t __ret; \
  18173. __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
  18174. __ret; \
  18175. })
  18176. #else
  18177. #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
  18178. int16x4_t __s0 = __p0; \
  18179. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18180. uint16x4_t __ret; \
  18181. __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
  18182. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18183. __ret; \
  18184. })
  18185. #endif
  18186. #ifdef __LITTLE_ENDIAN__
  18187. #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
  18188. uint32x4_t __s0 = __p0; \
  18189. uint16x4_t __ret; \
  18190. __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
  18191. __ret; \
  18192. })
  18193. #else
  18194. #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
  18195. uint32x4_t __s0 = __p0; \
  18196. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18197. uint16x4_t __ret; \
  18198. __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
  18199. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18200. __ret; \
  18201. })
  18202. #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
  18203. uint32x4_t __s0 = __p0; \
  18204. uint16x4_t __ret; \
  18205. __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
  18206. __ret; \
  18207. })
  18208. #endif
  18209. #ifdef __LITTLE_ENDIAN__
  18210. #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
  18211. uint64x2_t __s0 = __p0; \
  18212. uint32x2_t __ret; \
  18213. __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
  18214. __ret; \
  18215. })
  18216. #else
  18217. #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
  18218. uint64x2_t __s0 = __p0; \
  18219. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18220. uint32x2_t __ret; \
  18221. __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
  18222. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18223. __ret; \
  18224. })
  18225. #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
  18226. uint64x2_t __s0 = __p0; \
  18227. uint32x2_t __ret; \
  18228. __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
  18229. __ret; \
  18230. })
  18231. #endif
  18232. #ifdef __LITTLE_ENDIAN__
  18233. #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
  18234. uint16x8_t __s0 = __p0; \
  18235. uint8x8_t __ret; \
  18236. __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
  18237. __ret; \
  18238. })
  18239. #else
  18240. #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
  18241. uint16x8_t __s0 = __p0; \
  18242. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  18243. uint8x8_t __ret; \
  18244. __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
  18245. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18246. __ret; \
  18247. })
  18248. #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
  18249. uint16x8_t __s0 = __p0; \
  18250. uint8x8_t __ret; \
  18251. __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
  18252. __ret; \
  18253. })
  18254. #endif
  18255. #ifdef __LITTLE_ENDIAN__
  18256. #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
  18257. int32x4_t __s0 = __p0; \
  18258. int16x4_t __ret; \
  18259. __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
  18260. __ret; \
  18261. })
  18262. #else
  18263. #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
  18264. int32x4_t __s0 = __p0; \
  18265. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18266. int16x4_t __ret; \
  18267. __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
  18268. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18269. __ret; \
  18270. })
  18271. #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
  18272. int32x4_t __s0 = __p0; \
  18273. int16x4_t __ret; \
  18274. __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
  18275. __ret; \
  18276. })
  18277. #endif
  18278. #ifdef __LITTLE_ENDIAN__
  18279. #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
  18280. int64x2_t __s0 = __p0; \
  18281. int32x2_t __ret; \
  18282. __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
  18283. __ret; \
  18284. })
  18285. #else
  18286. #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
  18287. int64x2_t __s0 = __p0; \
  18288. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18289. int32x2_t __ret; \
  18290. __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
  18291. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18292. __ret; \
  18293. })
  18294. #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
  18295. int64x2_t __s0 = __p0; \
  18296. int32x2_t __ret; \
  18297. __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
  18298. __ret; \
  18299. })
  18300. #endif
  18301. #ifdef __LITTLE_ENDIAN__
  18302. #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
  18303. int16x8_t __s0 = __p0; \
  18304. int8x8_t __ret; \
  18305. __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
  18306. __ret; \
  18307. })
  18308. #else
  18309. #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
  18310. int16x8_t __s0 = __p0; \
  18311. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  18312. int8x8_t __ret; \
  18313. __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
  18314. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18315. __ret; \
  18316. })
  18317. #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
  18318. int16x8_t __s0 = __p0; \
  18319. int8x8_t __ret; \
  18320. __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
  18321. __ret; \
  18322. })
  18323. #endif
  18324. #ifdef __LITTLE_ENDIAN__
  18325. #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
  18326. int32x4_t __s0 = __p0; \
  18327. uint16x4_t __ret; \
  18328. __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
  18329. __ret; \
  18330. })
  18331. #else
  18332. #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
  18333. int32x4_t __s0 = __p0; \
  18334. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  18335. uint16x4_t __ret; \
  18336. __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
  18337. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  18338. __ret; \
  18339. })
  18340. #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
  18341. int32x4_t __s0 = __p0; \
  18342. uint16x4_t __ret; \
  18343. __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
  18344. __ret; \
  18345. })
  18346. #endif
  18347. #ifdef __LITTLE_ENDIAN__
  18348. #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
  18349. int64x2_t __s0 = __p0; \
  18350. uint32x2_t __ret; \
  18351. __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
  18352. __ret; \
  18353. })
  18354. #else
  18355. #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
  18356. int64x2_t __s0 = __p0; \
  18357. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  18358. uint32x2_t __ret; \
  18359. __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
  18360. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  18361. __ret; \
  18362. })
  18363. #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
  18364. int64x2_t __s0 = __p0; \
  18365. uint32x2_t __ret; \
  18366. __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
  18367. __ret; \
  18368. })
  18369. #endif
  18370. #ifdef __LITTLE_ENDIAN__
  18371. #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
  18372. int16x8_t __s0 = __p0; \
  18373. uint8x8_t __ret; \
  18374. __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
  18375. __ret; \
  18376. })
  18377. #else
  18378. #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
  18379. int16x8_t __s0 = __p0; \
  18380. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  18381. uint8x8_t __ret; \
  18382. __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
  18383. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  18384. __ret; \
  18385. })
  18386. #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
  18387. int16x8_t __s0 = __p0; \
  18388. uint8x8_t __ret; \
  18389. __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
  18390. __ret; \
  18391. })
  18392. #endif
  18393. #ifdef __LITTLE_ENDIAN__
  18394. __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  18395. uint8x16_t __ret;
  18396. __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  18397. return __ret;
  18398. }
  18399. #else
  18400. __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  18401. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18402. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18403. uint8x16_t __ret;
  18404. __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  18405. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18406. return __ret;
  18407. }
  18408. #endif
  18409. #ifdef __LITTLE_ENDIAN__
  18410. __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  18411. uint32x4_t __ret;
  18412. __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  18413. return __ret;
  18414. }
  18415. #else
  18416. __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  18417. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18418. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18419. uint32x4_t __ret;
  18420. __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  18421. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18422. return __ret;
  18423. }
  18424. #endif
  18425. #ifdef __LITTLE_ENDIAN__
  18426. __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  18427. uint64x2_t __ret;
  18428. __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  18429. return __ret;
  18430. }
  18431. #else
  18432. __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  18433. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18434. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18435. uint64x2_t __ret;
  18436. __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  18437. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18438. return __ret;
  18439. }
  18440. #endif
  18441. #ifdef __LITTLE_ENDIAN__
  18442. __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  18443. uint16x8_t __ret;
  18444. __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  18445. return __ret;
  18446. }
  18447. #else
  18448. __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  18449. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18450. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18451. uint16x8_t __ret;
  18452. __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  18453. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18454. return __ret;
  18455. }
  18456. #endif
  18457. #ifdef __LITTLE_ENDIAN__
  18458. __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  18459. int8x16_t __ret;
  18460. __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  18461. return __ret;
  18462. }
  18463. #else
  18464. __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  18465. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18466. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18467. int8x16_t __ret;
  18468. __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  18469. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18470. return __ret;
  18471. }
  18472. #endif
  18473. #ifdef __LITTLE_ENDIAN__
  18474. __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  18475. int32x4_t __ret;
  18476. __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  18477. return __ret;
  18478. }
  18479. #else
  18480. __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  18481. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18482. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18483. int32x4_t __ret;
  18484. __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  18485. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18486. return __ret;
  18487. }
  18488. __ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  18489. int32x4_t __ret;
  18490. __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  18491. return __ret;
  18492. }
  18493. #endif
  18494. #ifdef __LITTLE_ENDIAN__
  18495. __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
  18496. int64x2_t __ret;
  18497. __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  18498. return __ret;
  18499. }
  18500. #else
  18501. __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
  18502. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18503. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18504. int64x2_t __ret;
  18505. __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  18506. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18507. return __ret;
  18508. }
  18509. #endif
  18510. #ifdef __LITTLE_ENDIAN__
  18511. __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  18512. int16x8_t __ret;
  18513. __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  18514. return __ret;
  18515. }
  18516. #else
  18517. __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  18518. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18519. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18520. int16x8_t __ret;
  18521. __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  18522. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18523. return __ret;
  18524. }
  18525. __ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  18526. int16x8_t __ret;
  18527. __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  18528. return __ret;
  18529. }
  18530. #endif
  18531. #ifdef __LITTLE_ENDIAN__
  18532. __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  18533. uint8x8_t __ret;
  18534. __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  18535. return __ret;
  18536. }
  18537. #else
  18538. __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  18539. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18540. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18541. uint8x8_t __ret;
  18542. __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  18543. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18544. return __ret;
  18545. }
  18546. #endif
  18547. #ifdef __LITTLE_ENDIAN__
  18548. __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  18549. uint32x2_t __ret;
  18550. __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  18551. return __ret;
  18552. }
  18553. #else
  18554. __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  18555. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18556. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18557. uint32x2_t __ret;
  18558. __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  18559. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18560. return __ret;
  18561. }
  18562. #endif
  18563. #ifdef __LITTLE_ENDIAN__
  18564. __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
  18565. uint64x1_t __ret;
  18566. __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  18567. return __ret;
  18568. }
  18569. #else
  18570. __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
  18571. uint64x1_t __ret;
  18572. __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  18573. return __ret;
  18574. }
  18575. #endif
  18576. #ifdef __LITTLE_ENDIAN__
  18577. __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  18578. uint16x4_t __ret;
  18579. __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  18580. return __ret;
  18581. }
  18582. #else
  18583. __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  18584. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18585. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18586. uint16x4_t __ret;
  18587. __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  18588. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18589. return __ret;
  18590. }
  18591. #endif
  18592. #ifdef __LITTLE_ENDIAN__
  18593. __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
  18594. int8x8_t __ret;
  18595. __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  18596. return __ret;
  18597. }
  18598. #else
  18599. __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
  18600. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18601. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18602. int8x8_t __ret;
  18603. __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  18604. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18605. return __ret;
  18606. }
  18607. #endif
  18608. #ifdef __LITTLE_ENDIAN__
  18609. __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
  18610. int32x2_t __ret;
  18611. __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  18612. return __ret;
  18613. }
  18614. #else
  18615. __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
  18616. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18617. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18618. int32x2_t __ret;
  18619. __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  18620. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18621. return __ret;
  18622. }
  18623. __ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
  18624. int32x2_t __ret;
  18625. __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  18626. return __ret;
  18627. }
  18628. #endif
  18629. #ifdef __LITTLE_ENDIAN__
  18630. __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
  18631. int64x1_t __ret;
  18632. __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  18633. return __ret;
  18634. }
  18635. #else
  18636. __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
  18637. int64x1_t __ret;
  18638. __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  18639. return __ret;
  18640. }
  18641. #endif
  18642. #ifdef __LITTLE_ENDIAN__
  18643. __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
  18644. int16x4_t __ret;
  18645. __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  18646. return __ret;
  18647. }
  18648. #else
  18649. __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
  18650. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18651. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18652. int16x4_t __ret;
  18653. __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  18654. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18655. return __ret;
  18656. }
  18657. __ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
  18658. int16x4_t __ret;
  18659. __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  18660. return __ret;
  18661. }
  18662. #endif
  18663. #ifdef __LITTLE_ENDIAN__
  18664. __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  18665. uint16x4_t __ret;
  18666. __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  18667. return __ret;
  18668. }
  18669. #else
  18670. __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  18671. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18672. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18673. uint16x4_t __ret;
  18674. __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
  18675. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18676. return __ret;
  18677. }
  18678. __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  18679. uint16x4_t __ret;
  18680. __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  18681. return __ret;
  18682. }
  18683. #endif
  18684. #ifdef __LITTLE_ENDIAN__
  18685. __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  18686. uint32x2_t __ret;
  18687. __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  18688. return __ret;
  18689. }
  18690. #else
  18691. __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  18692. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18693. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18694. uint32x2_t __ret;
  18695. __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
  18696. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18697. return __ret;
  18698. }
  18699. __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  18700. uint32x2_t __ret;
  18701. __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  18702. return __ret;
  18703. }
  18704. #endif
  18705. #ifdef __LITTLE_ENDIAN__
  18706. __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  18707. uint8x8_t __ret;
  18708. __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  18709. return __ret;
  18710. }
  18711. #else
  18712. __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  18713. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18714. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18715. uint8x8_t __ret;
  18716. __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
  18717. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18718. return __ret;
  18719. }
  18720. __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  18721. uint8x8_t __ret;
  18722. __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  18723. return __ret;
  18724. }
  18725. #endif
  18726. #ifdef __LITTLE_ENDIAN__
  18727. __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  18728. int16x4_t __ret;
  18729. __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  18730. return __ret;
  18731. }
  18732. #else
  18733. __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  18734. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18735. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18736. int16x4_t __ret;
  18737. __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
  18738. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18739. return __ret;
  18740. }
  18741. __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
  18742. int16x4_t __ret;
  18743. __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  18744. return __ret;
  18745. }
  18746. #endif
  18747. #ifdef __LITTLE_ENDIAN__
  18748. __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  18749. int32x2_t __ret;
  18750. __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  18751. return __ret;
  18752. }
  18753. #else
  18754. __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  18755. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18756. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18757. int32x2_t __ret;
  18758. __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
  18759. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18760. return __ret;
  18761. }
  18762. __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
  18763. int32x2_t __ret;
  18764. __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  18765. return __ret;
  18766. }
  18767. #endif
  18768. #ifdef __LITTLE_ENDIAN__
  18769. __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  18770. int8x8_t __ret;
  18771. __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  18772. return __ret;
  18773. }
  18774. #else
  18775. __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  18776. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18777. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  18778. int8x8_t __ret;
  18779. __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
  18780. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18781. return __ret;
  18782. }
  18783. __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
  18784. int8x8_t __ret;
  18785. __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  18786. return __ret;
  18787. }
  18788. #endif
  18789. #ifdef __LITTLE_ENDIAN__
  18790. __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
  18791. uint32x4_t __ret;
  18792. __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
  18793. return __ret;
  18794. }
  18795. #else
  18796. __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
  18797. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18798. uint32x4_t __ret;
  18799. __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
  18800. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18801. return __ret;
  18802. }
  18803. #endif
  18804. #ifdef __LITTLE_ENDIAN__
  18805. __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
  18806. float32x4_t __ret;
  18807. __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
  18808. return __ret;
  18809. }
  18810. #else
  18811. __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
  18812. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18813. float32x4_t __ret;
  18814. __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
  18815. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18816. return __ret;
  18817. }
  18818. #endif
  18819. #ifdef __LITTLE_ENDIAN__
  18820. __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
  18821. uint32x2_t __ret;
  18822. __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
  18823. return __ret;
  18824. }
  18825. #else
  18826. __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
  18827. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18828. uint32x2_t __ret;
  18829. __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
  18830. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18831. return __ret;
  18832. }
  18833. #endif
  18834. #ifdef __LITTLE_ENDIAN__
  18835. __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
  18836. float32x2_t __ret;
  18837. __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
  18838. return __ret;
  18839. }
  18840. #else
  18841. __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
  18842. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18843. float32x2_t __ret;
  18844. __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
  18845. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18846. return __ret;
  18847. }
  18848. #endif
  18849. #ifdef __LITTLE_ENDIAN__
  18850. __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
  18851. float32x4_t __ret;
  18852. __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  18853. return __ret;
  18854. }
  18855. #else
  18856. __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
  18857. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18858. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  18859. float32x4_t __ret;
  18860. __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  18861. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18862. return __ret;
  18863. }
  18864. #endif
  18865. #ifdef __LITTLE_ENDIAN__
  18866. __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
  18867. float32x2_t __ret;
  18868. __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  18869. return __ret;
  18870. }
  18871. #else
  18872. __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
  18873. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  18874. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  18875. float32x2_t __ret;
  18876. __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  18877. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  18878. return __ret;
  18879. }
  18880. #endif
  18881. #ifdef __LITTLE_ENDIAN__
  18882. __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
  18883. poly8x8_t __ret;
  18884. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  18885. return __ret;
  18886. }
  18887. #else
  18888. __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
  18889. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18890. poly8x8_t __ret;
  18891. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  18892. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18893. return __ret;
  18894. }
  18895. #endif
  18896. #ifdef __LITTLE_ENDIAN__
  18897. __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
  18898. poly8x16_t __ret;
  18899. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18900. return __ret;
  18901. }
  18902. #else
  18903. __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
  18904. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18905. poly8x16_t __ret;
  18906. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18907. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18908. return __ret;
  18909. }
  18910. #endif
  18911. #ifdef __LITTLE_ENDIAN__
  18912. __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
  18913. uint8x16_t __ret;
  18914. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18915. return __ret;
  18916. }
  18917. #else
  18918. __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
  18919. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18920. uint8x16_t __ret;
  18921. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18922. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18923. return __ret;
  18924. }
  18925. #endif
  18926. #ifdef __LITTLE_ENDIAN__
  18927. __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
  18928. int8x16_t __ret;
  18929. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18930. return __ret;
  18931. }
  18932. #else
  18933. __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
  18934. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18935. int8x16_t __ret;
  18936. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
  18937. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  18938. return __ret;
  18939. }
  18940. #endif
  18941. #ifdef __LITTLE_ENDIAN__
  18942. __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
  18943. uint8x8_t __ret;
  18944. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  18945. return __ret;
  18946. }
  18947. #else
  18948. __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
  18949. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18950. uint8x8_t __ret;
  18951. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  18952. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18953. return __ret;
  18954. }
  18955. #endif
  18956. #ifdef __LITTLE_ENDIAN__
  18957. __ai int8x8_t vrev16_s8(int8x8_t __p0) {
  18958. int8x8_t __ret;
  18959. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  18960. return __ret;
  18961. }
  18962. #else
  18963. __ai int8x8_t vrev16_s8(int8x8_t __p0) {
  18964. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18965. int8x8_t __ret;
  18966. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  18967. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18968. return __ret;
  18969. }
  18970. #endif
  18971. #ifdef __LITTLE_ENDIAN__
  18972. __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
  18973. poly8x8_t __ret;
  18974. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  18975. return __ret;
  18976. }
  18977. #else
  18978. __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
  18979. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  18980. poly8x8_t __ret;
  18981. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  18982. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  18983. return __ret;
  18984. }
  18985. #endif
  18986. #ifdef __LITTLE_ENDIAN__
  18987. __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
  18988. poly16x4_t __ret;
  18989. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  18990. return __ret;
  18991. }
  18992. #else
  18993. __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
  18994. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  18995. poly16x4_t __ret;
  18996. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  18997. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  18998. return __ret;
  18999. }
  19000. #endif
  19001. #ifdef __LITTLE_ENDIAN__
  19002. __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
  19003. poly8x16_t __ret;
  19004. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19005. return __ret;
  19006. }
  19007. #else
  19008. __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
  19009. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19010. poly8x16_t __ret;
  19011. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19012. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19013. return __ret;
  19014. }
  19015. #endif
  19016. #ifdef __LITTLE_ENDIAN__
  19017. __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
  19018. poly16x8_t __ret;
  19019. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  19020. return __ret;
  19021. }
  19022. #else
  19023. __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
  19024. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19025. poly16x8_t __ret;
  19026. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  19027. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19028. return __ret;
  19029. }
  19030. #endif
  19031. #ifdef __LITTLE_ENDIAN__
  19032. __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
  19033. uint8x16_t __ret;
  19034. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19035. return __ret;
  19036. }
  19037. #else
  19038. __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
  19039. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19040. uint8x16_t __ret;
  19041. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19042. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19043. return __ret;
  19044. }
  19045. #endif
  19046. #ifdef __LITTLE_ENDIAN__
  19047. __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
  19048. uint16x8_t __ret;
  19049. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  19050. return __ret;
  19051. }
  19052. #else
  19053. __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
  19054. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19055. uint16x8_t __ret;
  19056. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  19057. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19058. return __ret;
  19059. }
  19060. #endif
  19061. #ifdef __LITTLE_ENDIAN__
  19062. __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
  19063. int8x16_t __ret;
  19064. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19065. return __ret;
  19066. }
  19067. #else
  19068. __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
  19069. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19070. int8x16_t __ret;
  19071. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
  19072. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19073. return __ret;
  19074. }
  19075. #endif
  19076. #ifdef __LITTLE_ENDIAN__
  19077. __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
  19078. int16x8_t __ret;
  19079. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
  19080. return __ret;
  19081. }
  19082. #else
  19083. __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
  19084. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19085. int16x8_t __ret;
  19086. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
  19087. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19088. return __ret;
  19089. }
  19090. #endif
  19091. #ifdef __LITTLE_ENDIAN__
  19092. __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
  19093. uint8x8_t __ret;
  19094. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  19095. return __ret;
  19096. }
  19097. #else
  19098. __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
  19099. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19100. uint8x8_t __ret;
  19101. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  19102. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19103. return __ret;
  19104. }
  19105. #endif
  19106. #ifdef __LITTLE_ENDIAN__
  19107. __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
  19108. uint16x4_t __ret;
  19109. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  19110. return __ret;
  19111. }
  19112. #else
  19113. __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
  19114. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19115. uint16x4_t __ret;
  19116. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  19117. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19118. return __ret;
  19119. }
  19120. #endif
  19121. #ifdef __LITTLE_ENDIAN__
  19122. __ai int8x8_t vrev32_s8(int8x8_t __p0) {
  19123. int8x8_t __ret;
  19124. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  19125. return __ret;
  19126. }
  19127. #else
  19128. __ai int8x8_t vrev32_s8(int8x8_t __p0) {
  19129. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19130. int8x8_t __ret;
  19131. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  19132. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19133. return __ret;
  19134. }
  19135. #endif
  19136. #ifdef __LITTLE_ENDIAN__
  19137. __ai int16x4_t vrev32_s16(int16x4_t __p0) {
  19138. int16x4_t __ret;
  19139. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  19140. return __ret;
  19141. }
  19142. #else
  19143. __ai int16x4_t vrev32_s16(int16x4_t __p0) {
  19144. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19145. int16x4_t __ret;
  19146. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  19147. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19148. return __ret;
  19149. }
  19150. #endif
  19151. #ifdef __LITTLE_ENDIAN__
  19152. __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
  19153. poly8x8_t __ret;
  19154. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19155. return __ret;
  19156. }
  19157. #else
  19158. __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
  19159. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19160. poly8x8_t __ret;
  19161. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
  19162. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19163. return __ret;
  19164. }
  19165. #endif
  19166. #ifdef __LITTLE_ENDIAN__
  19167. __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
  19168. poly16x4_t __ret;
  19169. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19170. return __ret;
  19171. }
  19172. #else
  19173. __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
  19174. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19175. poly16x4_t __ret;
  19176. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
  19177. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19178. return __ret;
  19179. }
  19180. #endif
  19181. #ifdef __LITTLE_ENDIAN__
  19182. __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
  19183. poly8x16_t __ret;
  19184. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19185. return __ret;
  19186. }
  19187. #else
  19188. __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
  19189. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19190. poly8x16_t __ret;
  19191. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19192. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19193. return __ret;
  19194. }
  19195. #endif
  19196. #ifdef __LITTLE_ENDIAN__
  19197. __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
  19198. poly16x8_t __ret;
  19199. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  19200. return __ret;
  19201. }
  19202. #else
  19203. __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
  19204. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19205. poly16x8_t __ret;
  19206. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  19207. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19208. return __ret;
  19209. }
  19210. #endif
  19211. #ifdef __LITTLE_ENDIAN__
  19212. __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
  19213. uint8x16_t __ret;
  19214. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19215. return __ret;
  19216. }
  19217. #else
  19218. __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
  19219. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19220. uint8x16_t __ret;
  19221. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19222. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19223. return __ret;
  19224. }
  19225. #endif
  19226. #ifdef __LITTLE_ENDIAN__
  19227. __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
  19228. uint32x4_t __ret;
  19229. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  19230. return __ret;
  19231. }
  19232. #else
  19233. __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
  19234. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19235. uint32x4_t __ret;
  19236. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  19237. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19238. return __ret;
  19239. }
  19240. #endif
  19241. #ifdef __LITTLE_ENDIAN__
  19242. __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
  19243. uint16x8_t __ret;
  19244. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  19245. return __ret;
  19246. }
  19247. #else
  19248. __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
  19249. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19250. uint16x8_t __ret;
  19251. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  19252. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19253. return __ret;
  19254. }
  19255. #endif
  19256. #ifdef __LITTLE_ENDIAN__
  19257. __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
  19258. int8x16_t __ret;
  19259. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19260. return __ret;
  19261. }
  19262. #else
  19263. __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
  19264. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19265. int8x16_t __ret;
  19266. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
  19267. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19268. return __ret;
  19269. }
  19270. #endif
  19271. #ifdef __LITTLE_ENDIAN__
  19272. __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
  19273. float32x4_t __ret;
  19274. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  19275. return __ret;
  19276. }
  19277. #else
  19278. __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
  19279. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19280. float32x4_t __ret;
  19281. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  19282. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19283. return __ret;
  19284. }
  19285. #endif
  19286. #ifdef __LITTLE_ENDIAN__
  19287. __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
  19288. int32x4_t __ret;
  19289. __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
  19290. return __ret;
  19291. }
  19292. #else
  19293. __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
  19294. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19295. int32x4_t __ret;
  19296. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
  19297. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19298. return __ret;
  19299. }
  19300. #endif
  19301. #ifdef __LITTLE_ENDIAN__
  19302. __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
  19303. int16x8_t __ret;
  19304. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  19305. return __ret;
  19306. }
  19307. #else
  19308. __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
  19309. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19310. int16x8_t __ret;
  19311. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  19312. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19313. return __ret;
  19314. }
  19315. #endif
  19316. #ifdef __LITTLE_ENDIAN__
  19317. __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
  19318. uint8x8_t __ret;
  19319. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19320. return __ret;
  19321. }
  19322. #else
  19323. __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
  19324. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19325. uint8x8_t __ret;
  19326. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
  19327. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19328. return __ret;
  19329. }
  19330. #endif
  19331. #ifdef __LITTLE_ENDIAN__
  19332. __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
  19333. uint32x2_t __ret;
  19334. __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
  19335. return __ret;
  19336. }
  19337. #else
  19338. __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
  19339. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19340. uint32x2_t __ret;
  19341. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
  19342. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19343. return __ret;
  19344. }
  19345. #endif
  19346. #ifdef __LITTLE_ENDIAN__
  19347. __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
  19348. uint16x4_t __ret;
  19349. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19350. return __ret;
  19351. }
  19352. #else
  19353. __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
  19354. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19355. uint16x4_t __ret;
  19356. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
  19357. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19358. return __ret;
  19359. }
  19360. #endif
  19361. #ifdef __LITTLE_ENDIAN__
  19362. __ai int8x8_t vrev64_s8(int8x8_t __p0) {
  19363. int8x8_t __ret;
  19364. __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19365. return __ret;
  19366. }
  19367. #else
  19368. __ai int8x8_t vrev64_s8(int8x8_t __p0) {
  19369. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19370. int8x8_t __ret;
  19371. __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
  19372. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19373. return __ret;
  19374. }
  19375. #endif
  19376. #ifdef __LITTLE_ENDIAN__
  19377. __ai float32x2_t vrev64_f32(float32x2_t __p0) {
  19378. float32x2_t __ret;
  19379. __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
  19380. return __ret;
  19381. }
  19382. #else
  19383. __ai float32x2_t vrev64_f32(float32x2_t __p0) {
  19384. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19385. float32x2_t __ret;
  19386. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
  19387. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19388. return __ret;
  19389. }
  19390. #endif
  19391. #ifdef __LITTLE_ENDIAN__
  19392. __ai int32x2_t vrev64_s32(int32x2_t __p0) {
  19393. int32x2_t __ret;
  19394. __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
  19395. return __ret;
  19396. }
  19397. #else
  19398. __ai int32x2_t vrev64_s32(int32x2_t __p0) {
  19399. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19400. int32x2_t __ret;
  19401. __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
  19402. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19403. return __ret;
  19404. }
  19405. #endif
  19406. #ifdef __LITTLE_ENDIAN__
  19407. __ai int16x4_t vrev64_s16(int16x4_t __p0) {
  19408. int16x4_t __ret;
  19409. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19410. return __ret;
  19411. }
  19412. #else
  19413. __ai int16x4_t vrev64_s16(int16x4_t __p0) {
  19414. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19415. int16x4_t __ret;
  19416. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
  19417. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19418. return __ret;
  19419. }
  19420. #endif
  19421. #ifdef __LITTLE_ENDIAN__
  19422. __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  19423. uint8x16_t __ret;
  19424. __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  19425. return __ret;
  19426. }
  19427. #else
  19428. __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  19429. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19430. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19431. uint8x16_t __ret;
  19432. __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  19433. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19434. return __ret;
  19435. }
  19436. #endif
  19437. #ifdef __LITTLE_ENDIAN__
  19438. __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  19439. uint32x4_t __ret;
  19440. __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  19441. return __ret;
  19442. }
  19443. #else
  19444. __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  19445. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19446. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19447. uint32x4_t __ret;
  19448. __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  19449. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19450. return __ret;
  19451. }
  19452. #endif
  19453. #ifdef __LITTLE_ENDIAN__
  19454. __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  19455. uint16x8_t __ret;
  19456. __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  19457. return __ret;
  19458. }
  19459. #else
  19460. __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  19461. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19462. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19463. uint16x8_t __ret;
  19464. __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  19465. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19466. return __ret;
  19467. }
  19468. #endif
  19469. #ifdef __LITTLE_ENDIAN__
  19470. __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  19471. int8x16_t __ret;
  19472. __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  19473. return __ret;
  19474. }
  19475. #else
  19476. __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  19477. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19478. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19479. int8x16_t __ret;
  19480. __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  19481. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19482. return __ret;
  19483. }
  19484. #endif
  19485. #ifdef __LITTLE_ENDIAN__
  19486. __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  19487. int32x4_t __ret;
  19488. __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  19489. return __ret;
  19490. }
  19491. #else
  19492. __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  19493. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19494. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19495. int32x4_t __ret;
  19496. __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  19497. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19498. return __ret;
  19499. }
  19500. #endif
  19501. #ifdef __LITTLE_ENDIAN__
  19502. __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  19503. int16x8_t __ret;
  19504. __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  19505. return __ret;
  19506. }
  19507. #else
  19508. __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  19509. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19510. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19511. int16x8_t __ret;
  19512. __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  19513. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19514. return __ret;
  19515. }
  19516. #endif
  19517. #ifdef __LITTLE_ENDIAN__
  19518. __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  19519. uint8x8_t __ret;
  19520. __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  19521. return __ret;
  19522. }
  19523. #else
  19524. __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  19525. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19526. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19527. uint8x8_t __ret;
  19528. __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  19529. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19530. return __ret;
  19531. }
  19532. #endif
  19533. #ifdef __LITTLE_ENDIAN__
  19534. __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  19535. uint32x2_t __ret;
  19536. __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  19537. return __ret;
  19538. }
  19539. #else
  19540. __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  19541. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19542. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19543. uint32x2_t __ret;
  19544. __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  19545. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19546. return __ret;
  19547. }
  19548. #endif
  19549. #ifdef __LITTLE_ENDIAN__
  19550. __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  19551. uint16x4_t __ret;
  19552. __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  19553. return __ret;
  19554. }
  19555. #else
  19556. __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  19557. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19558. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19559. uint16x4_t __ret;
  19560. __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  19561. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19562. return __ret;
  19563. }
  19564. #endif
  19565. #ifdef __LITTLE_ENDIAN__
  19566. __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
  19567. int8x8_t __ret;
  19568. __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  19569. return __ret;
  19570. }
  19571. #else
  19572. __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
  19573. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19574. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19575. int8x8_t __ret;
  19576. __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  19577. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19578. return __ret;
  19579. }
  19580. #endif
  19581. #ifdef __LITTLE_ENDIAN__
  19582. __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
  19583. int32x2_t __ret;
  19584. __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  19585. return __ret;
  19586. }
  19587. #else
  19588. __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
  19589. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19590. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19591. int32x2_t __ret;
  19592. __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  19593. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19594. return __ret;
  19595. }
  19596. #endif
  19597. #ifdef __LITTLE_ENDIAN__
  19598. __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
  19599. int16x4_t __ret;
  19600. __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  19601. return __ret;
  19602. }
  19603. #else
  19604. __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
  19605. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19606. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19607. int16x4_t __ret;
  19608. __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  19609. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19610. return __ret;
  19611. }
  19612. #endif
  19613. #ifdef __LITTLE_ENDIAN__
  19614. __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  19615. uint8x16_t __ret;
  19616. __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  19617. return __ret;
  19618. }
  19619. #else
  19620. __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  19621. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19622. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19623. uint8x16_t __ret;
  19624. __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  19625. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19626. return __ret;
  19627. }
  19628. #endif
  19629. #ifdef __LITTLE_ENDIAN__
  19630. __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  19631. uint32x4_t __ret;
  19632. __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  19633. return __ret;
  19634. }
  19635. #else
  19636. __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  19637. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19638. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19639. uint32x4_t __ret;
  19640. __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  19641. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19642. return __ret;
  19643. }
  19644. #endif
  19645. #ifdef __LITTLE_ENDIAN__
  19646. __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  19647. uint64x2_t __ret;
  19648. __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  19649. return __ret;
  19650. }
  19651. #else
  19652. __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  19653. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19654. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19655. uint64x2_t __ret;
  19656. __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  19657. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19658. return __ret;
  19659. }
  19660. #endif
  19661. #ifdef __LITTLE_ENDIAN__
  19662. __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  19663. uint16x8_t __ret;
  19664. __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  19665. return __ret;
  19666. }
  19667. #else
  19668. __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  19669. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19670. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19671. uint16x8_t __ret;
  19672. __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  19673. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19674. return __ret;
  19675. }
  19676. #endif
  19677. #ifdef __LITTLE_ENDIAN__
  19678. __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  19679. int8x16_t __ret;
  19680. __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  19681. return __ret;
  19682. }
  19683. #else
  19684. __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  19685. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19686. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19687. int8x16_t __ret;
  19688. __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  19689. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  19690. return __ret;
  19691. }
  19692. #endif
  19693. #ifdef __LITTLE_ENDIAN__
  19694. __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  19695. int32x4_t __ret;
  19696. __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  19697. return __ret;
  19698. }
  19699. #else
  19700. __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  19701. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19702. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19703. int32x4_t __ret;
  19704. __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  19705. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19706. return __ret;
  19707. }
  19708. #endif
  19709. #ifdef __LITTLE_ENDIAN__
  19710. __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  19711. int64x2_t __ret;
  19712. __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  19713. return __ret;
  19714. }
  19715. #else
  19716. __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  19717. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19718. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19719. int64x2_t __ret;
  19720. __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  19721. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19722. return __ret;
  19723. }
  19724. #endif
  19725. #ifdef __LITTLE_ENDIAN__
  19726. __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  19727. int16x8_t __ret;
  19728. __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  19729. return __ret;
  19730. }
  19731. #else
  19732. __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  19733. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19734. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19735. int16x8_t __ret;
  19736. __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  19737. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19738. return __ret;
  19739. }
  19740. #endif
  19741. #ifdef __LITTLE_ENDIAN__
  19742. __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  19743. uint8x8_t __ret;
  19744. __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  19745. return __ret;
  19746. }
  19747. #else
  19748. __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  19749. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19750. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19751. uint8x8_t __ret;
  19752. __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  19753. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19754. return __ret;
  19755. }
  19756. #endif
  19757. #ifdef __LITTLE_ENDIAN__
  19758. __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  19759. uint32x2_t __ret;
  19760. __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  19761. return __ret;
  19762. }
  19763. #else
  19764. __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  19765. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19766. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19767. uint32x2_t __ret;
  19768. __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  19769. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19770. return __ret;
  19771. }
  19772. #endif
  19773. #ifdef __LITTLE_ENDIAN__
  19774. __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  19775. uint64x1_t __ret;
  19776. __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  19777. return __ret;
  19778. }
  19779. #else
  19780. __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  19781. uint64x1_t __ret;
  19782. __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  19783. return __ret;
  19784. }
  19785. #endif
  19786. #ifdef __LITTLE_ENDIAN__
  19787. __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  19788. uint16x4_t __ret;
  19789. __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  19790. return __ret;
  19791. }
  19792. #else
  19793. __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  19794. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19795. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19796. uint16x4_t __ret;
  19797. __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  19798. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19799. return __ret;
  19800. }
  19801. #endif
  19802. #ifdef __LITTLE_ENDIAN__
  19803. __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
  19804. int8x8_t __ret;
  19805. __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  19806. return __ret;
  19807. }
  19808. #else
  19809. __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
  19810. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  19811. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  19812. int8x8_t __ret;
  19813. __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  19814. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  19815. return __ret;
  19816. }
  19817. #endif
  19818. #ifdef __LITTLE_ENDIAN__
  19819. __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
  19820. int32x2_t __ret;
  19821. __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  19822. return __ret;
  19823. }
  19824. #else
  19825. __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
  19826. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  19827. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  19828. int32x2_t __ret;
  19829. __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  19830. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  19831. return __ret;
  19832. }
  19833. #endif
  19834. #ifdef __LITTLE_ENDIAN__
  19835. __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
  19836. int64x1_t __ret;
  19837. __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  19838. return __ret;
  19839. }
  19840. #else
  19841. __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
  19842. int64x1_t __ret;
  19843. __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  19844. return __ret;
  19845. }
  19846. #endif
  19847. #ifdef __LITTLE_ENDIAN__
  19848. __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
  19849. int16x4_t __ret;
  19850. __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  19851. return __ret;
  19852. }
  19853. #else
  19854. __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
  19855. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  19856. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  19857. int16x4_t __ret;
  19858. __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  19859. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  19860. return __ret;
  19861. }
  19862. #endif
  19863. #ifdef __LITTLE_ENDIAN__
  19864. #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
  19865. uint8x16_t __s0 = __p0; \
  19866. uint8x16_t __ret; \
  19867. __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
  19868. __ret; \
  19869. })
  19870. #else
  19871. #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
  19872. uint8x16_t __s0 = __p0; \
  19873. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  19874. uint8x16_t __ret; \
  19875. __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
  19876. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  19877. __ret; \
  19878. })
  19879. #endif
  19880. #ifdef __LITTLE_ENDIAN__
  19881. #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
  19882. uint32x4_t __s0 = __p0; \
  19883. uint32x4_t __ret; \
  19884. __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
  19885. __ret; \
  19886. })
  19887. #else
  19888. #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
  19889. uint32x4_t __s0 = __p0; \
  19890. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  19891. uint32x4_t __ret; \
  19892. __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
  19893. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  19894. __ret; \
  19895. })
  19896. #endif
  19897. #ifdef __LITTLE_ENDIAN__
  19898. #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
  19899. uint64x2_t __s0 = __p0; \
  19900. uint64x2_t __ret; \
  19901. __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
  19902. __ret; \
  19903. })
  19904. #else
  19905. #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
  19906. uint64x2_t __s0 = __p0; \
  19907. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  19908. uint64x2_t __ret; \
  19909. __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
  19910. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  19911. __ret; \
  19912. })
  19913. #endif
  19914. #ifdef __LITTLE_ENDIAN__
  19915. #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
  19916. uint16x8_t __s0 = __p0; \
  19917. uint16x8_t __ret; \
  19918. __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
  19919. __ret; \
  19920. })
  19921. #else
  19922. #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
  19923. uint16x8_t __s0 = __p0; \
  19924. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  19925. uint16x8_t __ret; \
  19926. __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
  19927. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  19928. __ret; \
  19929. })
  19930. #endif
  19931. #ifdef __LITTLE_ENDIAN__
  19932. #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
  19933. int8x16_t __s0 = __p0; \
  19934. int8x16_t __ret; \
  19935. __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
  19936. __ret; \
  19937. })
  19938. #else
  19939. #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
  19940. int8x16_t __s0 = __p0; \
  19941. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  19942. int8x16_t __ret; \
  19943. __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
  19944. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  19945. __ret; \
  19946. })
  19947. #endif
  19948. #ifdef __LITTLE_ENDIAN__
  19949. #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
  19950. int32x4_t __s0 = __p0; \
  19951. int32x4_t __ret; \
  19952. __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
  19953. __ret; \
  19954. })
  19955. #else
  19956. #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
  19957. int32x4_t __s0 = __p0; \
  19958. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  19959. int32x4_t __ret; \
  19960. __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
  19961. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  19962. __ret; \
  19963. })
  19964. #endif
  19965. #ifdef __LITTLE_ENDIAN__
  19966. #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
  19967. int64x2_t __s0 = __p0; \
  19968. int64x2_t __ret; \
  19969. __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
  19970. __ret; \
  19971. })
  19972. #else
  19973. #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
  19974. int64x2_t __s0 = __p0; \
  19975. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  19976. int64x2_t __ret; \
  19977. __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
  19978. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  19979. __ret; \
  19980. })
  19981. #endif
  19982. #ifdef __LITTLE_ENDIAN__
  19983. #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
  19984. int16x8_t __s0 = __p0; \
  19985. int16x8_t __ret; \
  19986. __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
  19987. __ret; \
  19988. })
  19989. #else
  19990. #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
  19991. int16x8_t __s0 = __p0; \
  19992. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  19993. int16x8_t __ret; \
  19994. __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
  19995. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  19996. __ret; \
  19997. })
  19998. #endif
  19999. #ifdef __LITTLE_ENDIAN__
  20000. #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
  20001. uint8x8_t __s0 = __p0; \
  20002. uint8x8_t __ret; \
  20003. __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
  20004. __ret; \
  20005. })
  20006. #else
  20007. #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
  20008. uint8x8_t __s0 = __p0; \
  20009. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20010. uint8x8_t __ret; \
  20011. __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
  20012. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20013. __ret; \
  20014. })
  20015. #endif
  20016. #ifdef __LITTLE_ENDIAN__
  20017. #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
  20018. uint32x2_t __s0 = __p0; \
  20019. uint32x2_t __ret; \
  20020. __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
  20021. __ret; \
  20022. })
  20023. #else
  20024. #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
  20025. uint32x2_t __s0 = __p0; \
  20026. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20027. uint32x2_t __ret; \
  20028. __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
  20029. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20030. __ret; \
  20031. })
  20032. #endif
  20033. #ifdef __LITTLE_ENDIAN__
  20034. #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
  20035. uint64x1_t __s0 = __p0; \
  20036. uint64x1_t __ret; \
  20037. __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
  20038. __ret; \
  20039. })
  20040. #else
  20041. #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
  20042. uint64x1_t __s0 = __p0; \
  20043. uint64x1_t __ret; \
  20044. __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
  20045. __ret; \
  20046. })
  20047. #endif
  20048. #ifdef __LITTLE_ENDIAN__
  20049. #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
  20050. uint16x4_t __s0 = __p0; \
  20051. uint16x4_t __ret; \
  20052. __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
  20053. __ret; \
  20054. })
  20055. #else
  20056. #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
  20057. uint16x4_t __s0 = __p0; \
  20058. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20059. uint16x4_t __ret; \
  20060. __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
  20061. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20062. __ret; \
  20063. })
  20064. #endif
  20065. #ifdef __LITTLE_ENDIAN__
  20066. #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
  20067. int8x8_t __s0 = __p0; \
  20068. int8x8_t __ret; \
  20069. __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
  20070. __ret; \
  20071. })
  20072. #else
  20073. #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
  20074. int8x8_t __s0 = __p0; \
  20075. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20076. int8x8_t __ret; \
  20077. __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
  20078. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20079. __ret; \
  20080. })
  20081. #endif
  20082. #ifdef __LITTLE_ENDIAN__
  20083. #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
  20084. int32x2_t __s0 = __p0; \
  20085. int32x2_t __ret; \
  20086. __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
  20087. __ret; \
  20088. })
  20089. #else
  20090. #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
  20091. int32x2_t __s0 = __p0; \
  20092. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20093. int32x2_t __ret; \
  20094. __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
  20095. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20096. __ret; \
  20097. })
  20098. #endif
  20099. #ifdef __LITTLE_ENDIAN__
  20100. #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
  20101. int64x1_t __s0 = __p0; \
  20102. int64x1_t __ret; \
  20103. __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
  20104. __ret; \
  20105. })
  20106. #else
  20107. #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
  20108. int64x1_t __s0 = __p0; \
  20109. int64x1_t __ret; \
  20110. __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
  20111. __ret; \
  20112. })
  20113. #endif
  20114. #ifdef __LITTLE_ENDIAN__
  20115. #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
  20116. int16x4_t __s0 = __p0; \
  20117. int16x4_t __ret; \
  20118. __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
  20119. __ret; \
  20120. })
  20121. #else
  20122. #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
  20123. int16x4_t __s0 = __p0; \
  20124. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20125. int16x4_t __ret; \
  20126. __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
  20127. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20128. __ret; \
  20129. })
  20130. #endif
  20131. #ifdef __LITTLE_ENDIAN__
  20132. #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
  20133. uint32x4_t __s0 = __p0; \
  20134. uint16x4_t __ret; \
  20135. __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
  20136. __ret; \
  20137. })
  20138. #else
  20139. #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
  20140. uint32x4_t __s0 = __p0; \
  20141. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20142. uint16x4_t __ret; \
  20143. __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
  20144. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20145. __ret; \
  20146. })
  20147. #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
  20148. uint32x4_t __s0 = __p0; \
  20149. uint16x4_t __ret; \
  20150. __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
  20151. __ret; \
  20152. })
  20153. #endif
  20154. #ifdef __LITTLE_ENDIAN__
  20155. #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
  20156. uint64x2_t __s0 = __p0; \
  20157. uint32x2_t __ret; \
  20158. __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
  20159. __ret; \
  20160. })
  20161. #else
  20162. #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
  20163. uint64x2_t __s0 = __p0; \
  20164. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20165. uint32x2_t __ret; \
  20166. __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
  20167. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20168. __ret; \
  20169. })
  20170. #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
  20171. uint64x2_t __s0 = __p0; \
  20172. uint32x2_t __ret; \
  20173. __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
  20174. __ret; \
  20175. })
  20176. #endif
  20177. #ifdef __LITTLE_ENDIAN__
  20178. #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
  20179. uint16x8_t __s0 = __p0; \
  20180. uint8x8_t __ret; \
  20181. __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
  20182. __ret; \
  20183. })
  20184. #else
  20185. #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
  20186. uint16x8_t __s0 = __p0; \
  20187. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20188. uint8x8_t __ret; \
  20189. __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
  20190. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20191. __ret; \
  20192. })
  20193. #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
  20194. uint16x8_t __s0 = __p0; \
  20195. uint8x8_t __ret; \
  20196. __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
  20197. __ret; \
  20198. })
  20199. #endif
  20200. #ifdef __LITTLE_ENDIAN__
  20201. #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
  20202. int32x4_t __s0 = __p0; \
  20203. int16x4_t __ret; \
  20204. __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
  20205. __ret; \
  20206. })
  20207. #else
  20208. #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
  20209. int32x4_t __s0 = __p0; \
  20210. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20211. int16x4_t __ret; \
  20212. __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
  20213. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20214. __ret; \
  20215. })
  20216. #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
  20217. int32x4_t __s0 = __p0; \
  20218. int16x4_t __ret; \
  20219. __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
  20220. __ret; \
  20221. })
  20222. #endif
  20223. #ifdef __LITTLE_ENDIAN__
  20224. #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
  20225. int64x2_t __s0 = __p0; \
  20226. int32x2_t __ret; \
  20227. __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
  20228. __ret; \
  20229. })
  20230. #else
  20231. #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
  20232. int64x2_t __s0 = __p0; \
  20233. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20234. int32x2_t __ret; \
  20235. __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
  20236. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20237. __ret; \
  20238. })
  20239. #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
  20240. int64x2_t __s0 = __p0; \
  20241. int32x2_t __ret; \
  20242. __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
  20243. __ret; \
  20244. })
  20245. #endif
  20246. #ifdef __LITTLE_ENDIAN__
  20247. #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
  20248. int16x8_t __s0 = __p0; \
  20249. int8x8_t __ret; \
  20250. __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
  20251. __ret; \
  20252. })
  20253. #else
  20254. #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
  20255. int16x8_t __s0 = __p0; \
  20256. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20257. int8x8_t __ret; \
  20258. __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
  20259. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20260. __ret; \
  20261. })
  20262. #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
  20263. int16x8_t __s0 = __p0; \
  20264. int8x8_t __ret; \
  20265. __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
  20266. __ret; \
  20267. })
  20268. #endif
  20269. #ifdef __LITTLE_ENDIAN__
  20270. __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
  20271. uint32x4_t __ret;
  20272. __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
  20273. return __ret;
  20274. }
  20275. #else
  20276. __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
  20277. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  20278. uint32x4_t __ret;
  20279. __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
  20280. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  20281. return __ret;
  20282. }
  20283. #endif
  20284. #ifdef __LITTLE_ENDIAN__
  20285. __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
  20286. float32x4_t __ret;
  20287. __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
  20288. return __ret;
  20289. }
  20290. #else
  20291. __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
  20292. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  20293. float32x4_t __ret;
  20294. __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
  20295. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  20296. return __ret;
  20297. }
  20298. #endif
  20299. #ifdef __LITTLE_ENDIAN__
  20300. __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
  20301. uint32x2_t __ret;
  20302. __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
  20303. return __ret;
  20304. }
  20305. #else
  20306. __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
  20307. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  20308. uint32x2_t __ret;
  20309. __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
  20310. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  20311. return __ret;
  20312. }
  20313. #endif
  20314. #ifdef __LITTLE_ENDIAN__
  20315. __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
  20316. float32x2_t __ret;
  20317. __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
  20318. return __ret;
  20319. }
  20320. #else
  20321. __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
  20322. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  20323. float32x2_t __ret;
  20324. __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
  20325. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  20326. return __ret;
  20327. }
  20328. #endif
  20329. #ifdef __LITTLE_ENDIAN__
  20330. __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
  20331. float32x4_t __ret;
  20332. __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  20333. return __ret;
  20334. }
  20335. #else
  20336. __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
  20337. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  20338. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  20339. float32x4_t __ret;
  20340. __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  20341. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  20342. return __ret;
  20343. }
  20344. #endif
  20345. #ifdef __LITTLE_ENDIAN__
  20346. __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
  20347. float32x2_t __ret;
  20348. __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  20349. return __ret;
  20350. }
  20351. #else
  20352. __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
  20353. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  20354. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  20355. float32x2_t __ret;
  20356. __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  20357. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  20358. return __ret;
  20359. }
  20360. #endif
  20361. #ifdef __LITTLE_ENDIAN__
  20362. #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  20363. uint8x16_t __s0 = __p0; \
  20364. uint8x16_t __s1 = __p1; \
  20365. uint8x16_t __ret; \
  20366. __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
  20367. __ret; \
  20368. })
  20369. #else
  20370. #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  20371. uint8x16_t __s0 = __p0; \
  20372. uint8x16_t __s1 = __p1; \
  20373. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20374. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20375. uint8x16_t __ret; \
  20376. __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
  20377. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20378. __ret; \
  20379. })
  20380. #endif
  20381. #ifdef __LITTLE_ENDIAN__
  20382. #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  20383. uint32x4_t __s0 = __p0; \
  20384. uint32x4_t __s1 = __p1; \
  20385. uint32x4_t __ret; \
  20386. __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
  20387. __ret; \
  20388. })
  20389. #else
  20390. #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  20391. uint32x4_t __s0 = __p0; \
  20392. uint32x4_t __s1 = __p1; \
  20393. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20394. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20395. uint32x4_t __ret; \
  20396. __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
  20397. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20398. __ret; \
  20399. })
  20400. #endif
  20401. #ifdef __LITTLE_ENDIAN__
  20402. #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  20403. uint64x2_t __s0 = __p0; \
  20404. uint64x2_t __s1 = __p1; \
  20405. uint64x2_t __ret; \
  20406. __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
  20407. __ret; \
  20408. })
  20409. #else
  20410. #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  20411. uint64x2_t __s0 = __p0; \
  20412. uint64x2_t __s1 = __p1; \
  20413. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20414. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  20415. uint64x2_t __ret; \
  20416. __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
  20417. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20418. __ret; \
  20419. })
  20420. #endif
  20421. #ifdef __LITTLE_ENDIAN__
  20422. #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  20423. uint16x8_t __s0 = __p0; \
  20424. uint16x8_t __s1 = __p1; \
  20425. uint16x8_t __ret; \
  20426. __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
  20427. __ret; \
  20428. })
  20429. #else
  20430. #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  20431. uint16x8_t __s0 = __p0; \
  20432. uint16x8_t __s1 = __p1; \
  20433. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20434. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20435. uint16x8_t __ret; \
  20436. __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
  20437. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20438. __ret; \
  20439. })
  20440. #endif
  20441. #ifdef __LITTLE_ENDIAN__
  20442. #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  20443. int8x16_t __s0 = __p0; \
  20444. int8x16_t __s1 = __p1; \
  20445. int8x16_t __ret; \
  20446. __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
  20447. __ret; \
  20448. })
  20449. #else
  20450. #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  20451. int8x16_t __s0 = __p0; \
  20452. int8x16_t __s1 = __p1; \
  20453. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20454. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20455. int8x16_t __ret; \
  20456. __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
  20457. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20458. __ret; \
  20459. })
  20460. #endif
  20461. #ifdef __LITTLE_ENDIAN__
  20462. #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  20463. int32x4_t __s0 = __p0; \
  20464. int32x4_t __s1 = __p1; \
  20465. int32x4_t __ret; \
  20466. __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
  20467. __ret; \
  20468. })
  20469. #else
  20470. #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  20471. int32x4_t __s0 = __p0; \
  20472. int32x4_t __s1 = __p1; \
  20473. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20474. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20475. int32x4_t __ret; \
  20476. __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
  20477. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20478. __ret; \
  20479. })
  20480. #endif
  20481. #ifdef __LITTLE_ENDIAN__
  20482. #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  20483. int64x2_t __s0 = __p0; \
  20484. int64x2_t __s1 = __p1; \
  20485. int64x2_t __ret; \
  20486. __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
  20487. __ret; \
  20488. })
  20489. #else
  20490. #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  20491. int64x2_t __s0 = __p0; \
  20492. int64x2_t __s1 = __p1; \
  20493. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20494. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  20495. int64x2_t __ret; \
  20496. __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
  20497. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20498. __ret; \
  20499. })
  20500. #endif
  20501. #ifdef __LITTLE_ENDIAN__
  20502. #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  20503. int16x8_t __s0 = __p0; \
  20504. int16x8_t __s1 = __p1; \
  20505. int16x8_t __ret; \
  20506. __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
  20507. __ret; \
  20508. })
  20509. #else
  20510. #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  20511. int16x8_t __s0 = __p0; \
  20512. int16x8_t __s1 = __p1; \
  20513. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20514. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20515. int16x8_t __ret; \
  20516. __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
  20517. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20518. __ret; \
  20519. })
  20520. #endif
  20521. #ifdef __LITTLE_ENDIAN__
  20522. #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
  20523. uint8x8_t __s0 = __p0; \
  20524. uint8x8_t __s1 = __p1; \
  20525. uint8x8_t __ret; \
  20526. __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
  20527. __ret; \
  20528. })
  20529. #else
  20530. #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
  20531. uint8x8_t __s0 = __p0; \
  20532. uint8x8_t __s1 = __p1; \
  20533. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20534. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20535. uint8x8_t __ret; \
  20536. __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
  20537. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20538. __ret; \
  20539. })
  20540. #endif
  20541. #ifdef __LITTLE_ENDIAN__
  20542. #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
  20543. uint32x2_t __s0 = __p0; \
  20544. uint32x2_t __s1 = __p1; \
  20545. uint32x2_t __ret; \
  20546. __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
  20547. __ret; \
  20548. })
  20549. #else
  20550. #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
  20551. uint32x2_t __s0 = __p0; \
  20552. uint32x2_t __s1 = __p1; \
  20553. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20554. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  20555. uint32x2_t __ret; \
  20556. __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
  20557. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20558. __ret; \
  20559. })
  20560. #endif
  20561. #ifdef __LITTLE_ENDIAN__
  20562. #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
  20563. uint64x1_t __s0 = __p0; \
  20564. uint64x1_t __s1 = __p1; \
  20565. uint64x1_t __ret; \
  20566. __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  20567. __ret; \
  20568. })
  20569. #else
  20570. #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
  20571. uint64x1_t __s0 = __p0; \
  20572. uint64x1_t __s1 = __p1; \
  20573. uint64x1_t __ret; \
  20574. __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  20575. __ret; \
  20576. })
  20577. #endif
  20578. #ifdef __LITTLE_ENDIAN__
  20579. #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
  20580. uint16x4_t __s0 = __p0; \
  20581. uint16x4_t __s1 = __p1; \
  20582. uint16x4_t __ret; \
  20583. __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
  20584. __ret; \
  20585. })
  20586. #else
  20587. #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
  20588. uint16x4_t __s0 = __p0; \
  20589. uint16x4_t __s1 = __p1; \
  20590. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20591. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20592. uint16x4_t __ret; \
  20593. __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
  20594. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20595. __ret; \
  20596. })
  20597. #endif
  20598. #ifdef __LITTLE_ENDIAN__
  20599. #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
  20600. int8x8_t __s0 = __p0; \
  20601. int8x8_t __s1 = __p1; \
  20602. int8x8_t __ret; \
  20603. __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
  20604. __ret; \
  20605. })
  20606. #else
  20607. #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
  20608. int8x8_t __s0 = __p0; \
  20609. int8x8_t __s1 = __p1; \
  20610. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  20611. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20612. int8x8_t __ret; \
  20613. __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
  20614. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20615. __ret; \
  20616. })
  20617. #endif
  20618. #ifdef __LITTLE_ENDIAN__
  20619. #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
  20620. int32x2_t __s0 = __p0; \
  20621. int32x2_t __s1 = __p1; \
  20622. int32x2_t __ret; \
  20623. __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
  20624. __ret; \
  20625. })
  20626. #else
  20627. #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
  20628. int32x2_t __s0 = __p0; \
  20629. int32x2_t __s1 = __p1; \
  20630. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  20631. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  20632. int32x2_t __ret; \
  20633. __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
  20634. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20635. __ret; \
  20636. })
  20637. #endif
  20638. #ifdef __LITTLE_ENDIAN__
  20639. #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
  20640. int64x1_t __s0 = __p0; \
  20641. int64x1_t __s1 = __p1; \
  20642. int64x1_t __ret; \
  20643. __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  20644. __ret; \
  20645. })
  20646. #else
  20647. #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
  20648. int64x1_t __s0 = __p0; \
  20649. int64x1_t __s1 = __p1; \
  20650. int64x1_t __ret; \
  20651. __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  20652. __ret; \
  20653. })
  20654. #endif
  20655. #ifdef __LITTLE_ENDIAN__
  20656. #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
  20657. int16x4_t __s0 = __p0; \
  20658. int16x4_t __s1 = __p1; \
  20659. int16x4_t __ret; \
  20660. __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
  20661. __ret; \
  20662. })
  20663. #else
  20664. #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
  20665. int16x4_t __s0 = __p0; \
  20666. int16x4_t __s1 = __p1; \
  20667. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  20668. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20669. int16x4_t __ret; \
  20670. __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
  20671. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20672. __ret; \
  20673. })
  20674. #endif
  20675. #ifdef __LITTLE_ENDIAN__
  20676. __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  20677. uint16x4_t __ret;
  20678. __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  20679. return __ret;
  20680. }
  20681. #else
  20682. __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  20683. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  20684. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  20685. uint16x4_t __ret;
  20686. __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
  20687. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  20688. return __ret;
  20689. }
  20690. __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  20691. uint16x4_t __ret;
  20692. __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  20693. return __ret;
  20694. }
  20695. #endif
  20696. #ifdef __LITTLE_ENDIAN__
  20697. __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  20698. uint32x2_t __ret;
  20699. __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  20700. return __ret;
  20701. }
  20702. #else
  20703. __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  20704. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  20705. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  20706. uint32x2_t __ret;
  20707. __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
  20708. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  20709. return __ret;
  20710. }
  20711. __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  20712. uint32x2_t __ret;
  20713. __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  20714. return __ret;
  20715. }
  20716. #endif
  20717. #ifdef __LITTLE_ENDIAN__
  20718. __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  20719. uint8x8_t __ret;
  20720. __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  20721. return __ret;
  20722. }
  20723. #else
  20724. __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  20725. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  20726. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  20727. uint8x8_t __ret;
  20728. __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
  20729. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  20730. return __ret;
  20731. }
  20732. __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  20733. uint8x8_t __ret;
  20734. __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  20735. return __ret;
  20736. }
  20737. #endif
  20738. #ifdef __LITTLE_ENDIAN__
  20739. __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  20740. int16x4_t __ret;
  20741. __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  20742. return __ret;
  20743. }
  20744. #else
  20745. __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  20746. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  20747. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  20748. int16x4_t __ret;
  20749. __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
  20750. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  20751. return __ret;
  20752. }
  20753. __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  20754. int16x4_t __ret;
  20755. __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  20756. return __ret;
  20757. }
  20758. #endif
  20759. #ifdef __LITTLE_ENDIAN__
  20760. __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  20761. int32x2_t __ret;
  20762. __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  20763. return __ret;
  20764. }
  20765. #else
  20766. __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  20767. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  20768. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  20769. int32x2_t __ret;
  20770. __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
  20771. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  20772. return __ret;
  20773. }
  20774. __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  20775. int32x2_t __ret;
  20776. __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  20777. return __ret;
  20778. }
  20779. #endif
  20780. #ifdef __LITTLE_ENDIAN__
  20781. __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  20782. int8x8_t __ret;
  20783. __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  20784. return __ret;
  20785. }
  20786. #else
  20787. __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  20788. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  20789. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  20790. int8x8_t __ret;
  20791. __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
  20792. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  20793. return __ret;
  20794. }
  20795. __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  20796. int8x8_t __ret;
  20797. __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  20798. return __ret;
  20799. }
  20800. #endif
  20801. #ifdef __LITTLE_ENDIAN__
  20802. #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20803. poly8_t __s0 = __p0; \
  20804. poly8x8_t __s1 = __p1; \
  20805. poly8x8_t __ret; \
  20806. __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  20807. __ret; \
  20808. })
  20809. #else
  20810. #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20811. poly8_t __s0 = __p0; \
  20812. poly8x8_t __s1 = __p1; \
  20813. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20814. poly8x8_t __ret; \
  20815. __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
  20816. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20817. __ret; \
  20818. })
  20819. #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20820. poly8_t __s0 = __p0; \
  20821. poly8x8_t __s1 = __p1; \
  20822. poly8x8_t __ret; \
  20823. __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  20824. __ret; \
  20825. })
  20826. #endif
  20827. #ifdef __LITTLE_ENDIAN__
  20828. #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20829. poly16_t __s0 = __p0; \
  20830. poly16x4_t __s1 = __p1; \
  20831. poly16x4_t __ret; \
  20832. __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  20833. __ret; \
  20834. })
  20835. #else
  20836. #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20837. poly16_t __s0 = __p0; \
  20838. poly16x4_t __s1 = __p1; \
  20839. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20840. poly16x4_t __ret; \
  20841. __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
  20842. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20843. __ret; \
  20844. })
  20845. #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20846. poly16_t __s0 = __p0; \
  20847. poly16x4_t __s1 = __p1; \
  20848. poly16x4_t __ret; \
  20849. __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  20850. __ret; \
  20851. })
  20852. #endif
  20853. #ifdef __LITTLE_ENDIAN__
  20854. #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20855. poly8_t __s0 = __p0; \
  20856. poly8x16_t __s1 = __p1; \
  20857. poly8x16_t __ret; \
  20858. __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  20859. __ret; \
  20860. })
  20861. #else
  20862. #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20863. poly8_t __s0 = __p0; \
  20864. poly8x16_t __s1 = __p1; \
  20865. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20866. poly8x16_t __ret; \
  20867. __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
  20868. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20869. __ret; \
  20870. })
  20871. #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  20872. poly8_t __s0 = __p0; \
  20873. poly8x16_t __s1 = __p1; \
  20874. poly8x16_t __ret; \
  20875. __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  20876. __ret; \
  20877. })
  20878. #endif
  20879. #ifdef __LITTLE_ENDIAN__
  20880. #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20881. poly16_t __s0 = __p0; \
  20882. poly16x8_t __s1 = __p1; \
  20883. poly16x8_t __ret; \
  20884. __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  20885. __ret; \
  20886. })
  20887. #else
  20888. #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20889. poly16_t __s0 = __p0; \
  20890. poly16x8_t __s1 = __p1; \
  20891. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20892. poly16x8_t __ret; \
  20893. __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
  20894. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20895. __ret; \
  20896. })
  20897. #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  20898. poly16_t __s0 = __p0; \
  20899. poly16x8_t __s1 = __p1; \
  20900. poly16x8_t __ret; \
  20901. __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  20902. __ret; \
  20903. })
  20904. #endif
  20905. #ifdef __LITTLE_ENDIAN__
  20906. #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  20907. uint8_t __s0 = __p0; \
  20908. uint8x16_t __s1 = __p1; \
  20909. uint8x16_t __ret; \
  20910. __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  20911. __ret; \
  20912. })
  20913. #else
  20914. #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  20915. uint8_t __s0 = __p0; \
  20916. uint8x16_t __s1 = __p1; \
  20917. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20918. uint8x16_t __ret; \
  20919. __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
  20920. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  20921. __ret; \
  20922. })
  20923. #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  20924. uint8_t __s0 = __p0; \
  20925. uint8x16_t __s1 = __p1; \
  20926. uint8x16_t __ret; \
  20927. __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  20928. __ret; \
  20929. })
  20930. #endif
  20931. #ifdef __LITTLE_ENDIAN__
  20932. #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  20933. uint32_t __s0 = __p0; \
  20934. uint32x4_t __s1 = __p1; \
  20935. uint32x4_t __ret; \
  20936. __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
  20937. __ret; \
  20938. })
  20939. #else
  20940. #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  20941. uint32_t __s0 = __p0; \
  20942. uint32x4_t __s1 = __p1; \
  20943. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  20944. uint32x4_t __ret; \
  20945. __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
  20946. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  20947. __ret; \
  20948. })
  20949. #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  20950. uint32_t __s0 = __p0; \
  20951. uint32x4_t __s1 = __p1; \
  20952. uint32x4_t __ret; \
  20953. __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
  20954. __ret; \
  20955. })
  20956. #endif
  20957. #ifdef __LITTLE_ENDIAN__
  20958. #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  20959. uint64_t __s0 = __p0; \
  20960. uint64x2_t __s1 = __p1; \
  20961. uint64x2_t __ret; \
  20962. __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  20963. __ret; \
  20964. })
  20965. #else
  20966. #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  20967. uint64_t __s0 = __p0; \
  20968. uint64x2_t __s1 = __p1; \
  20969. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  20970. uint64x2_t __ret; \
  20971. __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
  20972. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  20973. __ret; \
  20974. })
  20975. #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  20976. uint64_t __s0 = __p0; \
  20977. uint64x2_t __s1 = __p1; \
  20978. uint64x2_t __ret; \
  20979. __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  20980. __ret; \
  20981. })
  20982. #endif
  20983. #ifdef __LITTLE_ENDIAN__
  20984. #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  20985. uint16_t __s0 = __p0; \
  20986. uint16x8_t __s1 = __p1; \
  20987. uint16x8_t __ret; \
  20988. __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  20989. __ret; \
  20990. })
  20991. #else
  20992. #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  20993. uint16_t __s0 = __p0; \
  20994. uint16x8_t __s1 = __p1; \
  20995. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  20996. uint16x8_t __ret; \
  20997. __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
  20998. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  20999. __ret; \
  21000. })
  21001. #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  21002. uint16_t __s0 = __p0; \
  21003. uint16x8_t __s1 = __p1; \
  21004. uint16x8_t __ret; \
  21005. __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  21006. __ret; \
  21007. })
  21008. #endif
  21009. #ifdef __LITTLE_ENDIAN__
  21010. #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21011. int8_t __s0 = __p0; \
  21012. int8x16_t __s1 = __p1; \
  21013. int8x16_t __ret; \
  21014. __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  21015. __ret; \
  21016. })
  21017. #else
  21018. #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21019. int8_t __s0 = __p0; \
  21020. int8x16_t __s1 = __p1; \
  21021. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21022. int8x16_t __ret; \
  21023. __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
  21024. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21025. __ret; \
  21026. })
  21027. #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21028. int8_t __s0 = __p0; \
  21029. int8x16_t __s1 = __p1; \
  21030. int8x16_t __ret; \
  21031. __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
  21032. __ret; \
  21033. })
  21034. #endif
  21035. #ifdef __LITTLE_ENDIAN__
  21036. #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21037. float32_t __s0 = __p0; \
  21038. float32x4_t __s1 = __p1; \
  21039. float32x4_t __ret; \
  21040. __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
  21041. __ret; \
  21042. })
  21043. #else
  21044. #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21045. float32_t __s0 = __p0; \
  21046. float32x4_t __s1 = __p1; \
  21047. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  21048. float32x4_t __ret; \
  21049. __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
  21050. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21051. __ret; \
  21052. })
  21053. #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21054. float32_t __s0 = __p0; \
  21055. float32x4_t __s1 = __p1; \
  21056. float32x4_t __ret; \
  21057. __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
  21058. __ret; \
  21059. })
  21060. #endif
  21061. #ifdef __LITTLE_ENDIAN__
  21062. #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21063. int32_t __s0 = __p0; \
  21064. int32x4_t __s1 = __p1; \
  21065. int32x4_t __ret; \
  21066. __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
  21067. __ret; \
  21068. })
  21069. #else
  21070. #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21071. int32_t __s0 = __p0; \
  21072. int32x4_t __s1 = __p1; \
  21073. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  21074. int32x4_t __ret; \
  21075. __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
  21076. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21077. __ret; \
  21078. })
  21079. #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21080. int32_t __s0 = __p0; \
  21081. int32x4_t __s1 = __p1; \
  21082. int32x4_t __ret; \
  21083. __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
  21084. __ret; \
  21085. })
  21086. #endif
  21087. #ifdef __LITTLE_ENDIAN__
  21088. #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21089. int64_t __s0 = __p0; \
  21090. int64x2_t __s1 = __p1; \
  21091. int64x2_t __ret; \
  21092. __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  21093. __ret; \
  21094. })
  21095. #else
  21096. #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21097. int64_t __s0 = __p0; \
  21098. int64x2_t __s1 = __p1; \
  21099. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  21100. int64x2_t __ret; \
  21101. __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
  21102. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21103. __ret; \
  21104. })
  21105. #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21106. int64_t __s0 = __p0; \
  21107. int64x2_t __s1 = __p1; \
  21108. int64x2_t __ret; \
  21109. __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  21110. __ret; \
  21111. })
  21112. #endif
  21113. #ifdef __LITTLE_ENDIAN__
  21114. #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21115. int16_t __s0 = __p0; \
  21116. int16x8_t __s1 = __p1; \
  21117. int16x8_t __ret; \
  21118. __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  21119. __ret; \
  21120. })
  21121. #else
  21122. #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21123. int16_t __s0 = __p0; \
  21124. int16x8_t __s1 = __p1; \
  21125. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  21126. int16x8_t __ret; \
  21127. __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
  21128. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21129. __ret; \
  21130. })
  21131. #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21132. int16_t __s0 = __p0; \
  21133. int16x8_t __s1 = __p1; \
  21134. int16x8_t __ret; \
  21135. __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
  21136. __ret; \
  21137. })
  21138. #endif
  21139. #ifdef __LITTLE_ENDIAN__
  21140. #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  21141. uint8_t __s0 = __p0; \
  21142. uint8x8_t __s1 = __p1; \
  21143. uint8x8_t __ret; \
  21144. __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  21145. __ret; \
  21146. })
  21147. #else
  21148. #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  21149. uint8_t __s0 = __p0; \
  21150. uint8x8_t __s1 = __p1; \
  21151. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  21152. uint8x8_t __ret; \
  21153. __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
  21154. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21155. __ret; \
  21156. })
  21157. #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  21158. uint8_t __s0 = __p0; \
  21159. uint8x8_t __s1 = __p1; \
  21160. uint8x8_t __ret; \
  21161. __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  21162. __ret; \
  21163. })
  21164. #endif
  21165. #ifdef __LITTLE_ENDIAN__
  21166. #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  21167. uint32_t __s0 = __p0; \
  21168. uint32x2_t __s1 = __p1; \
  21169. uint32x2_t __ret; \
  21170. __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
  21171. __ret; \
  21172. })
  21173. #else
  21174. #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  21175. uint32_t __s0 = __p0; \
  21176. uint32x2_t __s1 = __p1; \
  21177. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  21178. uint32x2_t __ret; \
  21179. __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
  21180. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21181. __ret; \
  21182. })
  21183. #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  21184. uint32_t __s0 = __p0; \
  21185. uint32x2_t __s1 = __p1; \
  21186. uint32x2_t __ret; \
  21187. __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
  21188. __ret; \
  21189. })
  21190. #endif
  21191. #ifdef __LITTLE_ENDIAN__
  21192. #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  21193. uint64_t __s0 = __p0; \
  21194. uint64x1_t __s1 = __p1; \
  21195. uint64x1_t __ret; \
  21196. __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21197. __ret; \
  21198. })
  21199. #else
  21200. #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  21201. uint64_t __s0 = __p0; \
  21202. uint64x1_t __s1 = __p1; \
  21203. uint64x1_t __ret; \
  21204. __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21205. __ret; \
  21206. })
  21207. #define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  21208. uint64_t __s0 = __p0; \
  21209. uint64x1_t __s1 = __p1; \
  21210. uint64x1_t __ret; \
  21211. __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21212. __ret; \
  21213. })
  21214. #endif
  21215. #ifdef __LITTLE_ENDIAN__
  21216. #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  21217. uint16_t __s0 = __p0; \
  21218. uint16x4_t __s1 = __p1; \
  21219. uint16x4_t __ret; \
  21220. __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  21221. __ret; \
  21222. })
  21223. #else
  21224. #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  21225. uint16_t __s0 = __p0; \
  21226. uint16x4_t __s1 = __p1; \
  21227. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  21228. uint16x4_t __ret; \
  21229. __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
  21230. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21231. __ret; \
  21232. })
  21233. #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  21234. uint16_t __s0 = __p0; \
  21235. uint16x4_t __s1 = __p1; \
  21236. uint16x4_t __ret; \
  21237. __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  21238. __ret; \
  21239. })
  21240. #endif
  21241. #ifdef __LITTLE_ENDIAN__
  21242. #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21243. int8_t __s0 = __p0; \
  21244. int8x8_t __s1 = __p1; \
  21245. int8x8_t __ret; \
  21246. __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  21247. __ret; \
  21248. })
  21249. #else
  21250. #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21251. int8_t __s0 = __p0; \
  21252. int8x8_t __s1 = __p1; \
  21253. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  21254. int8x8_t __ret; \
  21255. __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
  21256. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21257. __ret; \
  21258. })
  21259. #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  21260. int8_t __s0 = __p0; \
  21261. int8x8_t __s1 = __p1; \
  21262. int8x8_t __ret; \
  21263. __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
  21264. __ret; \
  21265. })
  21266. #endif
  21267. #ifdef __LITTLE_ENDIAN__
  21268. #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21269. float32_t __s0 = __p0; \
  21270. float32x2_t __s1 = __p1; \
  21271. float32x2_t __ret; \
  21272. __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
  21273. __ret; \
  21274. })
  21275. #else
  21276. #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21277. float32_t __s0 = __p0; \
  21278. float32x2_t __s1 = __p1; \
  21279. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  21280. float32x2_t __ret; \
  21281. __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
  21282. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21283. __ret; \
  21284. })
  21285. #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  21286. float32_t __s0 = __p0; \
  21287. float32x2_t __s1 = __p1; \
  21288. float32x2_t __ret; \
  21289. __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
  21290. __ret; \
  21291. })
  21292. #endif
  21293. #ifdef __LITTLE_ENDIAN__
  21294. #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21295. int32_t __s0 = __p0; \
  21296. int32x2_t __s1 = __p1; \
  21297. int32x2_t __ret; \
  21298. __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
  21299. __ret; \
  21300. })
  21301. #else
  21302. #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21303. int32_t __s0 = __p0; \
  21304. int32x2_t __s1 = __p1; \
  21305. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  21306. int32x2_t __ret; \
  21307. __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
  21308. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21309. __ret; \
  21310. })
  21311. #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  21312. int32_t __s0 = __p0; \
  21313. int32x2_t __s1 = __p1; \
  21314. int32x2_t __ret; \
  21315. __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
  21316. __ret; \
  21317. })
  21318. #endif
  21319. #ifdef __LITTLE_ENDIAN__
  21320. #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21321. int64_t __s0 = __p0; \
  21322. int64x1_t __s1 = __p1; \
  21323. int64x1_t __ret; \
  21324. __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21325. __ret; \
  21326. })
  21327. #else
  21328. #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21329. int64_t __s0 = __p0; \
  21330. int64x1_t __s1 = __p1; \
  21331. int64x1_t __ret; \
  21332. __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21333. __ret; \
  21334. })
  21335. #define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  21336. int64_t __s0 = __p0; \
  21337. int64x1_t __s1 = __p1; \
  21338. int64x1_t __ret; \
  21339. __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  21340. __ret; \
  21341. })
  21342. #endif
  21343. #ifdef __LITTLE_ENDIAN__
  21344. #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21345. int16_t __s0 = __p0; \
  21346. int16x4_t __s1 = __p1; \
  21347. int16x4_t __ret; \
  21348. __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  21349. __ret; \
  21350. })
  21351. #else
  21352. #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21353. int16_t __s0 = __p0; \
  21354. int16x4_t __s1 = __p1; \
  21355. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  21356. int16x4_t __ret; \
  21357. __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
  21358. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21359. __ret; \
  21360. })
  21361. #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  21362. int16_t __s0 = __p0; \
  21363. int16x4_t __s1 = __p1; \
  21364. int16x4_t __ret; \
  21365. __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
  21366. __ret; \
  21367. })
  21368. #endif
  21369. #ifdef __LITTLE_ENDIAN__
  21370. __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  21371. uint8x16_t __ret;
  21372. __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  21373. return __ret;
  21374. }
  21375. #else
  21376. __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
  21377. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21378. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21379. uint8x16_t __ret;
  21380. __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  21381. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21382. return __ret;
  21383. }
  21384. #endif
  21385. #ifdef __LITTLE_ENDIAN__
  21386. __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  21387. uint32x4_t __ret;
  21388. __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  21389. return __ret;
  21390. }
  21391. #else
  21392. __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
  21393. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  21394. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  21395. uint32x4_t __ret;
  21396. __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  21397. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  21398. return __ret;
  21399. }
  21400. #endif
  21401. #ifdef __LITTLE_ENDIAN__
  21402. __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  21403. uint64x2_t __ret;
  21404. __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  21405. return __ret;
  21406. }
  21407. #else
  21408. __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
  21409. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  21410. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  21411. uint64x2_t __ret;
  21412. __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  21413. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  21414. return __ret;
  21415. }
  21416. #endif
  21417. #ifdef __LITTLE_ENDIAN__
  21418. __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  21419. uint16x8_t __ret;
  21420. __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  21421. return __ret;
  21422. }
  21423. #else
  21424. __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
  21425. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  21426. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  21427. uint16x8_t __ret;
  21428. __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  21429. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  21430. return __ret;
  21431. }
  21432. #endif
  21433. #ifdef __LITTLE_ENDIAN__
  21434. __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  21435. int8x16_t __ret;
  21436. __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  21437. return __ret;
  21438. }
  21439. #else
  21440. __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
  21441. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21442. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21443. int8x16_t __ret;
  21444. __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  21445. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  21446. return __ret;
  21447. }
  21448. #endif
  21449. #ifdef __LITTLE_ENDIAN__
  21450. __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  21451. int32x4_t __ret;
  21452. __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  21453. return __ret;
  21454. }
  21455. #else
  21456. __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
  21457. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  21458. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  21459. int32x4_t __ret;
  21460. __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  21461. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  21462. return __ret;
  21463. }
  21464. #endif
  21465. #ifdef __LITTLE_ENDIAN__
  21466. __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  21467. int64x2_t __ret;
  21468. __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  21469. return __ret;
  21470. }
  21471. #else
  21472. __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
  21473. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  21474. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  21475. int64x2_t __ret;
  21476. __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  21477. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  21478. return __ret;
  21479. }
  21480. #endif
  21481. #ifdef __LITTLE_ENDIAN__
  21482. __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  21483. int16x8_t __ret;
  21484. __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  21485. return __ret;
  21486. }
  21487. #else
  21488. __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
  21489. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  21490. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  21491. int16x8_t __ret;
  21492. __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  21493. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  21494. return __ret;
  21495. }
  21496. #endif
  21497. #ifdef __LITTLE_ENDIAN__
  21498. __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  21499. uint8x8_t __ret;
  21500. __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  21501. return __ret;
  21502. }
  21503. #else
  21504. __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
  21505. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  21506. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  21507. uint8x8_t __ret;
  21508. __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  21509. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  21510. return __ret;
  21511. }
  21512. #endif
  21513. #ifdef __LITTLE_ENDIAN__
  21514. __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  21515. uint32x2_t __ret;
  21516. __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  21517. return __ret;
  21518. }
  21519. #else
  21520. __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
  21521. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  21522. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  21523. uint32x2_t __ret;
  21524. __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  21525. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  21526. return __ret;
  21527. }
  21528. #endif
  21529. #ifdef __LITTLE_ENDIAN__
  21530. __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  21531. uint64x1_t __ret;
  21532. __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  21533. return __ret;
  21534. }
  21535. #else
  21536. __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
  21537. uint64x1_t __ret;
  21538. __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  21539. return __ret;
  21540. }
  21541. #endif
  21542. #ifdef __LITTLE_ENDIAN__
  21543. __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  21544. uint16x4_t __ret;
  21545. __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  21546. return __ret;
  21547. }
  21548. #else
  21549. __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
  21550. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  21551. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  21552. uint16x4_t __ret;
  21553. __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  21554. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  21555. return __ret;
  21556. }
  21557. #endif
  21558. #ifdef __LITTLE_ENDIAN__
  21559. __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
  21560. int8x8_t __ret;
  21561. __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  21562. return __ret;
  21563. }
  21564. #else
  21565. __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
  21566. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  21567. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  21568. int8x8_t __ret;
  21569. __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  21570. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  21571. return __ret;
  21572. }
  21573. #endif
  21574. #ifdef __LITTLE_ENDIAN__
  21575. __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
  21576. int32x2_t __ret;
  21577. __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  21578. return __ret;
  21579. }
  21580. #else
  21581. __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
  21582. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  21583. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  21584. int32x2_t __ret;
  21585. __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  21586. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  21587. return __ret;
  21588. }
  21589. #endif
  21590. #ifdef __LITTLE_ENDIAN__
  21591. __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
  21592. int64x1_t __ret;
  21593. __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  21594. return __ret;
  21595. }
  21596. #else
  21597. __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
  21598. int64x1_t __ret;
  21599. __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  21600. return __ret;
  21601. }
  21602. #endif
  21603. #ifdef __LITTLE_ENDIAN__
  21604. __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
  21605. int16x4_t __ret;
  21606. __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  21607. return __ret;
  21608. }
  21609. #else
  21610. __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
  21611. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  21612. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  21613. int16x4_t __ret;
  21614. __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  21615. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  21616. return __ret;
  21617. }
  21618. #endif
  21619. #ifdef __LITTLE_ENDIAN__
  21620. #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
  21621. uint8x16_t __s0 = __p0; \
  21622. uint8x16_t __ret; \
  21623. __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
  21624. __ret; \
  21625. })
  21626. #else
  21627. #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
  21628. uint8x16_t __s0 = __p0; \
  21629. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21630. uint8x16_t __ret; \
  21631. __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
  21632. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21633. __ret; \
  21634. })
  21635. #endif
  21636. #ifdef __LITTLE_ENDIAN__
  21637. #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
  21638. uint32x4_t __s0 = __p0; \
  21639. uint32x4_t __ret; \
  21640. __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
  21641. __ret; \
  21642. })
  21643. #else
  21644. #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
  21645. uint32x4_t __s0 = __p0; \
  21646. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  21647. uint32x4_t __ret; \
  21648. __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
  21649. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21650. __ret; \
  21651. })
  21652. #endif
  21653. #ifdef __LITTLE_ENDIAN__
  21654. #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
  21655. uint64x2_t __s0 = __p0; \
  21656. uint64x2_t __ret; \
  21657. __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
  21658. __ret; \
  21659. })
  21660. #else
  21661. #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
  21662. uint64x2_t __s0 = __p0; \
  21663. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21664. uint64x2_t __ret; \
  21665. __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
  21666. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21667. __ret; \
  21668. })
  21669. #endif
  21670. #ifdef __LITTLE_ENDIAN__
  21671. #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
  21672. uint16x8_t __s0 = __p0; \
  21673. uint16x8_t __ret; \
  21674. __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
  21675. __ret; \
  21676. })
  21677. #else
  21678. #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
  21679. uint16x8_t __s0 = __p0; \
  21680. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21681. uint16x8_t __ret; \
  21682. __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
  21683. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21684. __ret; \
  21685. })
  21686. #endif
  21687. #ifdef __LITTLE_ENDIAN__
  21688. #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
  21689. int8x16_t __s0 = __p0; \
  21690. int8x16_t __ret; \
  21691. __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
  21692. __ret; \
  21693. })
  21694. #else
  21695. #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
  21696. int8x16_t __s0 = __p0; \
  21697. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21698. int8x16_t __ret; \
  21699. __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
  21700. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  21701. __ret; \
  21702. })
  21703. #endif
  21704. #ifdef __LITTLE_ENDIAN__
  21705. #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
  21706. int32x4_t __s0 = __p0; \
  21707. int32x4_t __ret; \
  21708. __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
  21709. __ret; \
  21710. })
  21711. #else
  21712. #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
  21713. int32x4_t __s0 = __p0; \
  21714. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  21715. int32x4_t __ret; \
  21716. __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
  21717. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21718. __ret; \
  21719. })
  21720. #endif
  21721. #ifdef __LITTLE_ENDIAN__
  21722. #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
  21723. int64x2_t __s0 = __p0; \
  21724. int64x2_t __ret; \
  21725. __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
  21726. __ret; \
  21727. })
  21728. #else
  21729. #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
  21730. int64x2_t __s0 = __p0; \
  21731. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21732. int64x2_t __ret; \
  21733. __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
  21734. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21735. __ret; \
  21736. })
  21737. #endif
  21738. #ifdef __LITTLE_ENDIAN__
  21739. #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
  21740. int16x8_t __s0 = __p0; \
  21741. int16x8_t __ret; \
  21742. __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
  21743. __ret; \
  21744. })
  21745. #else
  21746. #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
  21747. int16x8_t __s0 = __p0; \
  21748. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21749. int16x8_t __ret; \
  21750. __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
  21751. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21752. __ret; \
  21753. })
  21754. #endif
  21755. #ifdef __LITTLE_ENDIAN__
  21756. #define vshl_n_u8(__p0, __p1) __extension__ ({ \
  21757. uint8x8_t __s0 = __p0; \
  21758. uint8x8_t __ret; \
  21759. __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
  21760. __ret; \
  21761. })
  21762. #else
  21763. #define vshl_n_u8(__p0, __p1) __extension__ ({ \
  21764. uint8x8_t __s0 = __p0; \
  21765. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21766. uint8x8_t __ret; \
  21767. __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
  21768. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21769. __ret; \
  21770. })
  21771. #endif
  21772. #ifdef __LITTLE_ENDIAN__
  21773. #define vshl_n_u32(__p0, __p1) __extension__ ({ \
  21774. uint32x2_t __s0 = __p0; \
  21775. uint32x2_t __ret; \
  21776. __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
  21777. __ret; \
  21778. })
  21779. #else
  21780. #define vshl_n_u32(__p0, __p1) __extension__ ({ \
  21781. uint32x2_t __s0 = __p0; \
  21782. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21783. uint32x2_t __ret; \
  21784. __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
  21785. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21786. __ret; \
  21787. })
  21788. #endif
  21789. #ifdef __LITTLE_ENDIAN__
  21790. #define vshl_n_u64(__p0, __p1) __extension__ ({ \
  21791. uint64x1_t __s0 = __p0; \
  21792. uint64x1_t __ret; \
  21793. __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
  21794. __ret; \
  21795. })
  21796. #else
  21797. #define vshl_n_u64(__p0, __p1) __extension__ ({ \
  21798. uint64x1_t __s0 = __p0; \
  21799. uint64x1_t __ret; \
  21800. __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
  21801. __ret; \
  21802. })
  21803. #endif
  21804. #ifdef __LITTLE_ENDIAN__
  21805. #define vshl_n_u16(__p0, __p1) __extension__ ({ \
  21806. uint16x4_t __s0 = __p0; \
  21807. uint16x4_t __ret; \
  21808. __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
  21809. __ret; \
  21810. })
  21811. #else
  21812. #define vshl_n_u16(__p0, __p1) __extension__ ({ \
  21813. uint16x4_t __s0 = __p0; \
  21814. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  21815. uint16x4_t __ret; \
  21816. __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
  21817. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21818. __ret; \
  21819. })
  21820. #endif
  21821. #ifdef __LITTLE_ENDIAN__
  21822. #define vshl_n_s8(__p0, __p1) __extension__ ({ \
  21823. int8x8_t __s0 = __p0; \
  21824. int8x8_t __ret; \
  21825. __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
  21826. __ret; \
  21827. })
  21828. #else
  21829. #define vshl_n_s8(__p0, __p1) __extension__ ({ \
  21830. int8x8_t __s0 = __p0; \
  21831. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21832. int8x8_t __ret; \
  21833. __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
  21834. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21835. __ret; \
  21836. })
  21837. #endif
  21838. #ifdef __LITTLE_ENDIAN__
  21839. #define vshl_n_s32(__p0, __p1) __extension__ ({ \
  21840. int32x2_t __s0 = __p0; \
  21841. int32x2_t __ret; \
  21842. __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
  21843. __ret; \
  21844. })
  21845. #else
  21846. #define vshl_n_s32(__p0, __p1) __extension__ ({ \
  21847. int32x2_t __s0 = __p0; \
  21848. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21849. int32x2_t __ret; \
  21850. __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
  21851. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21852. __ret; \
  21853. })
  21854. #endif
  21855. #ifdef __LITTLE_ENDIAN__
  21856. #define vshl_n_s64(__p0, __p1) __extension__ ({ \
  21857. int64x1_t __s0 = __p0; \
  21858. int64x1_t __ret; \
  21859. __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
  21860. __ret; \
  21861. })
  21862. #else
  21863. #define vshl_n_s64(__p0, __p1) __extension__ ({ \
  21864. int64x1_t __s0 = __p0; \
  21865. int64x1_t __ret; \
  21866. __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
  21867. __ret; \
  21868. })
  21869. #endif
  21870. #ifdef __LITTLE_ENDIAN__
  21871. #define vshl_n_s16(__p0, __p1) __extension__ ({ \
  21872. int16x4_t __s0 = __p0; \
  21873. int16x4_t __ret; \
  21874. __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
  21875. __ret; \
  21876. })
  21877. #else
  21878. #define vshl_n_s16(__p0, __p1) __extension__ ({ \
  21879. int16x4_t __s0 = __p0; \
  21880. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  21881. int16x4_t __ret; \
  21882. __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
  21883. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21884. __ret; \
  21885. })
  21886. #endif
  21887. #ifdef __LITTLE_ENDIAN__
  21888. #define vshll_n_u8(__p0, __p1) __extension__ ({ \
  21889. uint8x8_t __s0 = __p0; \
  21890. uint16x8_t __ret; \
  21891. __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
  21892. __ret; \
  21893. })
  21894. #else
  21895. #define vshll_n_u8(__p0, __p1) __extension__ ({ \
  21896. uint8x8_t __s0 = __p0; \
  21897. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21898. uint16x8_t __ret; \
  21899. __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
  21900. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21901. __ret; \
  21902. })
  21903. #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
  21904. uint8x8_t __s0 = __p0; \
  21905. uint16x8_t __ret; \
  21906. __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
  21907. __ret; \
  21908. })
  21909. #endif
  21910. #ifdef __LITTLE_ENDIAN__
  21911. #define vshll_n_u32(__p0, __p1) __extension__ ({ \
  21912. uint32x2_t __s0 = __p0; \
  21913. uint64x2_t __ret; \
  21914. __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
  21915. __ret; \
  21916. })
  21917. #else
  21918. #define vshll_n_u32(__p0, __p1) __extension__ ({ \
  21919. uint32x2_t __s0 = __p0; \
  21920. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21921. uint64x2_t __ret; \
  21922. __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
  21923. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21924. __ret; \
  21925. })
  21926. #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
  21927. uint32x2_t __s0 = __p0; \
  21928. uint64x2_t __ret; \
  21929. __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
  21930. __ret; \
  21931. })
  21932. #endif
  21933. #ifdef __LITTLE_ENDIAN__
  21934. #define vshll_n_u16(__p0, __p1) __extension__ ({ \
  21935. uint16x4_t __s0 = __p0; \
  21936. uint32x4_t __ret; \
  21937. __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
  21938. __ret; \
  21939. })
  21940. #else
  21941. #define vshll_n_u16(__p0, __p1) __extension__ ({ \
  21942. uint16x4_t __s0 = __p0; \
  21943. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  21944. uint32x4_t __ret; \
  21945. __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
  21946. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  21947. __ret; \
  21948. })
  21949. #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
  21950. uint16x4_t __s0 = __p0; \
  21951. uint32x4_t __ret; \
  21952. __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
  21953. __ret; \
  21954. })
  21955. #endif
  21956. #ifdef __LITTLE_ENDIAN__
  21957. #define vshll_n_s8(__p0, __p1) __extension__ ({ \
  21958. int8x8_t __s0 = __p0; \
  21959. int16x8_t __ret; \
  21960. __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
  21961. __ret; \
  21962. })
  21963. #else
  21964. #define vshll_n_s8(__p0, __p1) __extension__ ({ \
  21965. int8x8_t __s0 = __p0; \
  21966. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  21967. int16x8_t __ret; \
  21968. __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
  21969. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  21970. __ret; \
  21971. })
  21972. #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
  21973. int8x8_t __s0 = __p0; \
  21974. int16x8_t __ret; \
  21975. __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
  21976. __ret; \
  21977. })
  21978. #endif
  21979. #ifdef __LITTLE_ENDIAN__
  21980. #define vshll_n_s32(__p0, __p1) __extension__ ({ \
  21981. int32x2_t __s0 = __p0; \
  21982. int64x2_t __ret; \
  21983. __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
  21984. __ret; \
  21985. })
  21986. #else
  21987. #define vshll_n_s32(__p0, __p1) __extension__ ({ \
  21988. int32x2_t __s0 = __p0; \
  21989. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  21990. int64x2_t __ret; \
  21991. __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
  21992. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  21993. __ret; \
  21994. })
  21995. #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
  21996. int32x2_t __s0 = __p0; \
  21997. int64x2_t __ret; \
  21998. __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
  21999. __ret; \
  22000. })
  22001. #endif
  22002. #ifdef __LITTLE_ENDIAN__
  22003. #define vshll_n_s16(__p0, __p1) __extension__ ({ \
  22004. int16x4_t __s0 = __p0; \
  22005. int32x4_t __ret; \
  22006. __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
  22007. __ret; \
  22008. })
  22009. #else
  22010. #define vshll_n_s16(__p0, __p1) __extension__ ({ \
  22011. int16x4_t __s0 = __p0; \
  22012. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22013. int32x4_t __ret; \
  22014. __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
  22015. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22016. __ret; \
  22017. })
  22018. #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
  22019. int16x4_t __s0 = __p0; \
  22020. int32x4_t __ret; \
  22021. __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
  22022. __ret; \
  22023. })
  22024. #endif
  22025. #ifdef __LITTLE_ENDIAN__
  22026. #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
  22027. uint8x16_t __s0 = __p0; \
  22028. uint8x16_t __ret; \
  22029. __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
  22030. __ret; \
  22031. })
  22032. #else
  22033. #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
  22034. uint8x16_t __s0 = __p0; \
  22035. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22036. uint8x16_t __ret; \
  22037. __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
  22038. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22039. __ret; \
  22040. })
  22041. #endif
  22042. #ifdef __LITTLE_ENDIAN__
  22043. #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
  22044. uint32x4_t __s0 = __p0; \
  22045. uint32x4_t __ret; \
  22046. __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
  22047. __ret; \
  22048. })
  22049. #else
  22050. #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
  22051. uint32x4_t __s0 = __p0; \
  22052. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22053. uint32x4_t __ret; \
  22054. __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
  22055. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22056. __ret; \
  22057. })
  22058. #endif
  22059. #ifdef __LITTLE_ENDIAN__
  22060. #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
  22061. uint64x2_t __s0 = __p0; \
  22062. uint64x2_t __ret; \
  22063. __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
  22064. __ret; \
  22065. })
  22066. #else
  22067. #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
  22068. uint64x2_t __s0 = __p0; \
  22069. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22070. uint64x2_t __ret; \
  22071. __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
  22072. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22073. __ret; \
  22074. })
  22075. #endif
  22076. #ifdef __LITTLE_ENDIAN__
  22077. #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
  22078. uint16x8_t __s0 = __p0; \
  22079. uint16x8_t __ret; \
  22080. __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
  22081. __ret; \
  22082. })
  22083. #else
  22084. #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
  22085. uint16x8_t __s0 = __p0; \
  22086. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22087. uint16x8_t __ret; \
  22088. __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
  22089. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22090. __ret; \
  22091. })
  22092. #endif
  22093. #ifdef __LITTLE_ENDIAN__
  22094. #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
  22095. int8x16_t __s0 = __p0; \
  22096. int8x16_t __ret; \
  22097. __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
  22098. __ret; \
  22099. })
  22100. #else
  22101. #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
  22102. int8x16_t __s0 = __p0; \
  22103. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22104. int8x16_t __ret; \
  22105. __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
  22106. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22107. __ret; \
  22108. })
  22109. #endif
  22110. #ifdef __LITTLE_ENDIAN__
  22111. #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
  22112. int32x4_t __s0 = __p0; \
  22113. int32x4_t __ret; \
  22114. __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
  22115. __ret; \
  22116. })
  22117. #else
  22118. #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
  22119. int32x4_t __s0 = __p0; \
  22120. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22121. int32x4_t __ret; \
  22122. __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
  22123. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22124. __ret; \
  22125. })
  22126. #endif
  22127. #ifdef __LITTLE_ENDIAN__
  22128. #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
  22129. int64x2_t __s0 = __p0; \
  22130. int64x2_t __ret; \
  22131. __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
  22132. __ret; \
  22133. })
  22134. #else
  22135. #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
  22136. int64x2_t __s0 = __p0; \
  22137. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22138. int64x2_t __ret; \
  22139. __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
  22140. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22141. __ret; \
  22142. })
  22143. #endif
  22144. #ifdef __LITTLE_ENDIAN__
  22145. #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
  22146. int16x8_t __s0 = __p0; \
  22147. int16x8_t __ret; \
  22148. __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
  22149. __ret; \
  22150. })
  22151. #else
  22152. #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
  22153. int16x8_t __s0 = __p0; \
  22154. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22155. int16x8_t __ret; \
  22156. __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
  22157. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22158. __ret; \
  22159. })
  22160. #endif
  22161. #ifdef __LITTLE_ENDIAN__
  22162. #define vshr_n_u8(__p0, __p1) __extension__ ({ \
  22163. uint8x8_t __s0 = __p0; \
  22164. uint8x8_t __ret; \
  22165. __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
  22166. __ret; \
  22167. })
  22168. #else
  22169. #define vshr_n_u8(__p0, __p1) __extension__ ({ \
  22170. uint8x8_t __s0 = __p0; \
  22171. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22172. uint8x8_t __ret; \
  22173. __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
  22174. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22175. __ret; \
  22176. })
  22177. #endif
  22178. #ifdef __LITTLE_ENDIAN__
  22179. #define vshr_n_u32(__p0, __p1) __extension__ ({ \
  22180. uint32x2_t __s0 = __p0; \
  22181. uint32x2_t __ret; \
  22182. __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
  22183. __ret; \
  22184. })
  22185. #else
  22186. #define vshr_n_u32(__p0, __p1) __extension__ ({ \
  22187. uint32x2_t __s0 = __p0; \
  22188. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22189. uint32x2_t __ret; \
  22190. __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
  22191. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22192. __ret; \
  22193. })
  22194. #endif
  22195. #ifdef __LITTLE_ENDIAN__
  22196. #define vshr_n_u64(__p0, __p1) __extension__ ({ \
  22197. uint64x1_t __s0 = __p0; \
  22198. uint64x1_t __ret; \
  22199. __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
  22200. __ret; \
  22201. })
  22202. #else
  22203. #define vshr_n_u64(__p0, __p1) __extension__ ({ \
  22204. uint64x1_t __s0 = __p0; \
  22205. uint64x1_t __ret; \
  22206. __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
  22207. __ret; \
  22208. })
  22209. #endif
  22210. #ifdef __LITTLE_ENDIAN__
  22211. #define vshr_n_u16(__p0, __p1) __extension__ ({ \
  22212. uint16x4_t __s0 = __p0; \
  22213. uint16x4_t __ret; \
  22214. __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
  22215. __ret; \
  22216. })
  22217. #else
  22218. #define vshr_n_u16(__p0, __p1) __extension__ ({ \
  22219. uint16x4_t __s0 = __p0; \
  22220. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22221. uint16x4_t __ret; \
  22222. __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
  22223. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22224. __ret; \
  22225. })
  22226. #endif
  22227. #ifdef __LITTLE_ENDIAN__
  22228. #define vshr_n_s8(__p0, __p1) __extension__ ({ \
  22229. int8x8_t __s0 = __p0; \
  22230. int8x8_t __ret; \
  22231. __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
  22232. __ret; \
  22233. })
  22234. #else
  22235. #define vshr_n_s8(__p0, __p1) __extension__ ({ \
  22236. int8x8_t __s0 = __p0; \
  22237. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22238. int8x8_t __ret; \
  22239. __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
  22240. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22241. __ret; \
  22242. })
  22243. #endif
  22244. #ifdef __LITTLE_ENDIAN__
  22245. #define vshr_n_s32(__p0, __p1) __extension__ ({ \
  22246. int32x2_t __s0 = __p0; \
  22247. int32x2_t __ret; \
  22248. __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
  22249. __ret; \
  22250. })
  22251. #else
  22252. #define vshr_n_s32(__p0, __p1) __extension__ ({ \
  22253. int32x2_t __s0 = __p0; \
  22254. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22255. int32x2_t __ret; \
  22256. __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
  22257. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22258. __ret; \
  22259. })
  22260. #endif
  22261. #ifdef __LITTLE_ENDIAN__
  22262. #define vshr_n_s64(__p0, __p1) __extension__ ({ \
  22263. int64x1_t __s0 = __p0; \
  22264. int64x1_t __ret; \
  22265. __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
  22266. __ret; \
  22267. })
  22268. #else
  22269. #define vshr_n_s64(__p0, __p1) __extension__ ({ \
  22270. int64x1_t __s0 = __p0; \
  22271. int64x1_t __ret; \
  22272. __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
  22273. __ret; \
  22274. })
  22275. #endif
  22276. #ifdef __LITTLE_ENDIAN__
  22277. #define vshr_n_s16(__p0, __p1) __extension__ ({ \
  22278. int16x4_t __s0 = __p0; \
  22279. int16x4_t __ret; \
  22280. __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
  22281. __ret; \
  22282. })
  22283. #else
  22284. #define vshr_n_s16(__p0, __p1) __extension__ ({ \
  22285. int16x4_t __s0 = __p0; \
  22286. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22287. int16x4_t __ret; \
  22288. __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
  22289. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22290. __ret; \
  22291. })
  22292. #endif
  22293. #ifdef __LITTLE_ENDIAN__
  22294. #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
  22295. uint32x4_t __s0 = __p0; \
  22296. uint16x4_t __ret; \
  22297. __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
  22298. __ret; \
  22299. })
  22300. #else
  22301. #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
  22302. uint32x4_t __s0 = __p0; \
  22303. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22304. uint16x4_t __ret; \
  22305. __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
  22306. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22307. __ret; \
  22308. })
  22309. #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
  22310. uint32x4_t __s0 = __p0; \
  22311. uint16x4_t __ret; \
  22312. __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
  22313. __ret; \
  22314. })
  22315. #endif
  22316. #ifdef __LITTLE_ENDIAN__
  22317. #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
  22318. uint64x2_t __s0 = __p0; \
  22319. uint32x2_t __ret; \
  22320. __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
  22321. __ret; \
  22322. })
  22323. #else
  22324. #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
  22325. uint64x2_t __s0 = __p0; \
  22326. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22327. uint32x2_t __ret; \
  22328. __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
  22329. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22330. __ret; \
  22331. })
  22332. #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
  22333. uint64x2_t __s0 = __p0; \
  22334. uint32x2_t __ret; \
  22335. __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
  22336. __ret; \
  22337. })
  22338. #endif
  22339. #ifdef __LITTLE_ENDIAN__
  22340. #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
  22341. uint16x8_t __s0 = __p0; \
  22342. uint8x8_t __ret; \
  22343. __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
  22344. __ret; \
  22345. })
  22346. #else
  22347. #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
  22348. uint16x8_t __s0 = __p0; \
  22349. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22350. uint8x8_t __ret; \
  22351. __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
  22352. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22353. __ret; \
  22354. })
  22355. #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
  22356. uint16x8_t __s0 = __p0; \
  22357. uint8x8_t __ret; \
  22358. __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
  22359. __ret; \
  22360. })
  22361. #endif
  22362. #ifdef __LITTLE_ENDIAN__
  22363. #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
  22364. int32x4_t __s0 = __p0; \
  22365. int16x4_t __ret; \
  22366. __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
  22367. __ret; \
  22368. })
  22369. #else
  22370. #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
  22371. int32x4_t __s0 = __p0; \
  22372. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22373. int16x4_t __ret; \
  22374. __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
  22375. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22376. __ret; \
  22377. })
  22378. #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
  22379. int32x4_t __s0 = __p0; \
  22380. int16x4_t __ret; \
  22381. __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
  22382. __ret; \
  22383. })
  22384. #endif
  22385. #ifdef __LITTLE_ENDIAN__
  22386. #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
  22387. int64x2_t __s0 = __p0; \
  22388. int32x2_t __ret; \
  22389. __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
  22390. __ret; \
  22391. })
  22392. #else
  22393. #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
  22394. int64x2_t __s0 = __p0; \
  22395. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22396. int32x2_t __ret; \
  22397. __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
  22398. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22399. __ret; \
  22400. })
  22401. #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
  22402. int64x2_t __s0 = __p0; \
  22403. int32x2_t __ret; \
  22404. __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
  22405. __ret; \
  22406. })
  22407. #endif
  22408. #ifdef __LITTLE_ENDIAN__
  22409. #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
  22410. int16x8_t __s0 = __p0; \
  22411. int8x8_t __ret; \
  22412. __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
  22413. __ret; \
  22414. })
  22415. #else
  22416. #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
  22417. int16x8_t __s0 = __p0; \
  22418. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22419. int8x8_t __ret; \
  22420. __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
  22421. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22422. __ret; \
  22423. })
  22424. #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
  22425. int16x8_t __s0 = __p0; \
  22426. int8x8_t __ret; \
  22427. __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
  22428. __ret; \
  22429. })
  22430. #endif
  22431. #ifdef __LITTLE_ENDIAN__
  22432. #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
  22433. poly8x8_t __s0 = __p0; \
  22434. poly8x8_t __s1 = __p1; \
  22435. poly8x8_t __ret; \
  22436. __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
  22437. __ret; \
  22438. })
  22439. #else
  22440. #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
  22441. poly8x8_t __s0 = __p0; \
  22442. poly8x8_t __s1 = __p1; \
  22443. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22444. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22445. poly8x8_t __ret; \
  22446. __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
  22447. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22448. __ret; \
  22449. })
  22450. #endif
  22451. #ifdef __LITTLE_ENDIAN__
  22452. #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
  22453. poly16x4_t __s0 = __p0; \
  22454. poly16x4_t __s1 = __p1; \
  22455. poly16x4_t __ret; \
  22456. __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
  22457. __ret; \
  22458. })
  22459. #else
  22460. #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
  22461. poly16x4_t __s0 = __p0; \
  22462. poly16x4_t __s1 = __p1; \
  22463. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22464. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22465. poly16x4_t __ret; \
  22466. __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
  22467. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22468. __ret; \
  22469. })
  22470. #endif
  22471. #ifdef __LITTLE_ENDIAN__
  22472. #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
  22473. poly8x16_t __s0 = __p0; \
  22474. poly8x16_t __s1 = __p1; \
  22475. poly8x16_t __ret; \
  22476. __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
  22477. __ret; \
  22478. })
  22479. #else
  22480. #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
  22481. poly8x16_t __s0 = __p0; \
  22482. poly8x16_t __s1 = __p1; \
  22483. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22484. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22485. poly8x16_t __ret; \
  22486. __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
  22487. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22488. __ret; \
  22489. })
  22490. #endif
  22491. #ifdef __LITTLE_ENDIAN__
  22492. #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
  22493. poly16x8_t __s0 = __p0; \
  22494. poly16x8_t __s1 = __p1; \
  22495. poly16x8_t __ret; \
  22496. __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
  22497. __ret; \
  22498. })
  22499. #else
  22500. #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
  22501. poly16x8_t __s0 = __p0; \
  22502. poly16x8_t __s1 = __p1; \
  22503. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22504. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22505. poly16x8_t __ret; \
  22506. __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
  22507. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22508. __ret; \
  22509. })
  22510. #endif
  22511. #ifdef __LITTLE_ENDIAN__
  22512. #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22513. uint8x16_t __s0 = __p0; \
  22514. uint8x16_t __s1 = __p1; \
  22515. uint8x16_t __ret; \
  22516. __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
  22517. __ret; \
  22518. })
  22519. #else
  22520. #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22521. uint8x16_t __s0 = __p0; \
  22522. uint8x16_t __s1 = __p1; \
  22523. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22524. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22525. uint8x16_t __ret; \
  22526. __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
  22527. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22528. __ret; \
  22529. })
  22530. #endif
  22531. #ifdef __LITTLE_ENDIAN__
  22532. #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22533. uint32x4_t __s0 = __p0; \
  22534. uint32x4_t __s1 = __p1; \
  22535. uint32x4_t __ret; \
  22536. __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
  22537. __ret; \
  22538. })
  22539. #else
  22540. #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22541. uint32x4_t __s0 = __p0; \
  22542. uint32x4_t __s1 = __p1; \
  22543. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22544. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22545. uint32x4_t __ret; \
  22546. __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
  22547. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22548. __ret; \
  22549. })
  22550. #endif
  22551. #ifdef __LITTLE_ENDIAN__
  22552. #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22553. uint64x2_t __s0 = __p0; \
  22554. uint64x2_t __s1 = __p1; \
  22555. uint64x2_t __ret; \
  22556. __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
  22557. __ret; \
  22558. })
  22559. #else
  22560. #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22561. uint64x2_t __s0 = __p0; \
  22562. uint64x2_t __s1 = __p1; \
  22563. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22564. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22565. uint64x2_t __ret; \
  22566. __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
  22567. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22568. __ret; \
  22569. })
  22570. #endif
  22571. #ifdef __LITTLE_ENDIAN__
  22572. #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22573. uint16x8_t __s0 = __p0; \
  22574. uint16x8_t __s1 = __p1; \
  22575. uint16x8_t __ret; \
  22576. __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
  22577. __ret; \
  22578. })
  22579. #else
  22580. #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22581. uint16x8_t __s0 = __p0; \
  22582. uint16x8_t __s1 = __p1; \
  22583. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22584. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22585. uint16x8_t __ret; \
  22586. __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
  22587. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22588. __ret; \
  22589. })
  22590. #endif
  22591. #ifdef __LITTLE_ENDIAN__
  22592. #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22593. int8x16_t __s0 = __p0; \
  22594. int8x16_t __s1 = __p1; \
  22595. int8x16_t __ret; \
  22596. __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
  22597. __ret; \
  22598. })
  22599. #else
  22600. #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22601. int8x16_t __s0 = __p0; \
  22602. int8x16_t __s1 = __p1; \
  22603. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22604. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22605. int8x16_t __ret; \
  22606. __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
  22607. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22608. __ret; \
  22609. })
  22610. #endif
  22611. #ifdef __LITTLE_ENDIAN__
  22612. #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22613. int32x4_t __s0 = __p0; \
  22614. int32x4_t __s1 = __p1; \
  22615. int32x4_t __ret; \
  22616. __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
  22617. __ret; \
  22618. })
  22619. #else
  22620. #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22621. int32x4_t __s0 = __p0; \
  22622. int32x4_t __s1 = __p1; \
  22623. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22624. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22625. int32x4_t __ret; \
  22626. __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
  22627. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22628. __ret; \
  22629. })
  22630. #endif
  22631. #ifdef __LITTLE_ENDIAN__
  22632. #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22633. int64x2_t __s0 = __p0; \
  22634. int64x2_t __s1 = __p1; \
  22635. int64x2_t __ret; \
  22636. __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
  22637. __ret; \
  22638. })
  22639. #else
  22640. #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22641. int64x2_t __s0 = __p0; \
  22642. int64x2_t __s1 = __p1; \
  22643. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22644. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22645. int64x2_t __ret; \
  22646. __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
  22647. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22648. __ret; \
  22649. })
  22650. #endif
  22651. #ifdef __LITTLE_ENDIAN__
  22652. #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22653. int16x8_t __s0 = __p0; \
  22654. int16x8_t __s1 = __p1; \
  22655. int16x8_t __ret; \
  22656. __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
  22657. __ret; \
  22658. })
  22659. #else
  22660. #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22661. int16x8_t __s0 = __p0; \
  22662. int16x8_t __s1 = __p1; \
  22663. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22664. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22665. int16x8_t __ret; \
  22666. __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
  22667. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22668. __ret; \
  22669. })
  22670. #endif
  22671. #ifdef __LITTLE_ENDIAN__
  22672. #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22673. uint8x8_t __s0 = __p0; \
  22674. uint8x8_t __s1 = __p1; \
  22675. uint8x8_t __ret; \
  22676. __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
  22677. __ret; \
  22678. })
  22679. #else
  22680. #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22681. uint8x8_t __s0 = __p0; \
  22682. uint8x8_t __s1 = __p1; \
  22683. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22684. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22685. uint8x8_t __ret; \
  22686. __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
  22687. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22688. __ret; \
  22689. })
  22690. #endif
  22691. #ifdef __LITTLE_ENDIAN__
  22692. #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22693. uint32x2_t __s0 = __p0; \
  22694. uint32x2_t __s1 = __p1; \
  22695. uint32x2_t __ret; \
  22696. __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
  22697. __ret; \
  22698. })
  22699. #else
  22700. #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22701. uint32x2_t __s0 = __p0; \
  22702. uint32x2_t __s1 = __p1; \
  22703. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22704. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22705. uint32x2_t __ret; \
  22706. __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
  22707. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22708. __ret; \
  22709. })
  22710. #endif
  22711. #ifdef __LITTLE_ENDIAN__
  22712. #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22713. uint64x1_t __s0 = __p0; \
  22714. uint64x1_t __s1 = __p1; \
  22715. uint64x1_t __ret; \
  22716. __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  22717. __ret; \
  22718. })
  22719. #else
  22720. #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22721. uint64x1_t __s0 = __p0; \
  22722. uint64x1_t __s1 = __p1; \
  22723. uint64x1_t __ret; \
  22724. __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  22725. __ret; \
  22726. })
  22727. #endif
  22728. #ifdef __LITTLE_ENDIAN__
  22729. #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22730. uint16x4_t __s0 = __p0; \
  22731. uint16x4_t __s1 = __p1; \
  22732. uint16x4_t __ret; \
  22733. __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
  22734. __ret; \
  22735. })
  22736. #else
  22737. #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22738. uint16x4_t __s0 = __p0; \
  22739. uint16x4_t __s1 = __p1; \
  22740. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22741. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22742. uint16x4_t __ret; \
  22743. __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
  22744. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22745. __ret; \
  22746. })
  22747. #endif
  22748. #ifdef __LITTLE_ENDIAN__
  22749. #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22750. int8x8_t __s0 = __p0; \
  22751. int8x8_t __s1 = __p1; \
  22752. int8x8_t __ret; \
  22753. __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
  22754. __ret; \
  22755. })
  22756. #else
  22757. #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22758. int8x8_t __s0 = __p0; \
  22759. int8x8_t __s1 = __p1; \
  22760. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22761. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22762. int8x8_t __ret; \
  22763. __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
  22764. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22765. __ret; \
  22766. })
  22767. #endif
  22768. #ifdef __LITTLE_ENDIAN__
  22769. #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22770. int32x2_t __s0 = __p0; \
  22771. int32x2_t __s1 = __p1; \
  22772. int32x2_t __ret; \
  22773. __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
  22774. __ret; \
  22775. })
  22776. #else
  22777. #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22778. int32x2_t __s0 = __p0; \
  22779. int32x2_t __s1 = __p1; \
  22780. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22781. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22782. int32x2_t __ret; \
  22783. __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
  22784. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22785. __ret; \
  22786. })
  22787. #endif
  22788. #ifdef __LITTLE_ENDIAN__
  22789. #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22790. int64x1_t __s0 = __p0; \
  22791. int64x1_t __s1 = __p1; \
  22792. int64x1_t __ret; \
  22793. __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  22794. __ret; \
  22795. })
  22796. #else
  22797. #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22798. int64x1_t __s0 = __p0; \
  22799. int64x1_t __s1 = __p1; \
  22800. int64x1_t __ret; \
  22801. __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  22802. __ret; \
  22803. })
  22804. #endif
  22805. #ifdef __LITTLE_ENDIAN__
  22806. #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22807. int16x4_t __s0 = __p0; \
  22808. int16x4_t __s1 = __p1; \
  22809. int16x4_t __ret; \
  22810. __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
  22811. __ret; \
  22812. })
  22813. #else
  22814. #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22815. int16x4_t __s0 = __p0; \
  22816. int16x4_t __s1 = __p1; \
  22817. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22818. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22819. int16x4_t __ret; \
  22820. __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
  22821. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22822. __ret; \
  22823. })
  22824. #endif
  22825. #ifdef __LITTLE_ENDIAN__
  22826. #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22827. uint8x16_t __s0 = __p0; \
  22828. uint8x16_t __s1 = __p1; \
  22829. uint8x16_t __ret; \
  22830. __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
  22831. __ret; \
  22832. })
  22833. #else
  22834. #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22835. uint8x16_t __s0 = __p0; \
  22836. uint8x16_t __s1 = __p1; \
  22837. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22838. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22839. uint8x16_t __ret; \
  22840. __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
  22841. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22842. __ret; \
  22843. })
  22844. #endif
  22845. #ifdef __LITTLE_ENDIAN__
  22846. #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22847. uint32x4_t __s0 = __p0; \
  22848. uint32x4_t __s1 = __p1; \
  22849. uint32x4_t __ret; \
  22850. __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
  22851. __ret; \
  22852. })
  22853. #else
  22854. #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  22855. uint32x4_t __s0 = __p0; \
  22856. uint32x4_t __s1 = __p1; \
  22857. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22858. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22859. uint32x4_t __ret; \
  22860. __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
  22861. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22862. __ret; \
  22863. })
  22864. #endif
  22865. #ifdef __LITTLE_ENDIAN__
  22866. #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22867. uint64x2_t __s0 = __p0; \
  22868. uint64x2_t __s1 = __p1; \
  22869. uint64x2_t __ret; \
  22870. __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
  22871. __ret; \
  22872. })
  22873. #else
  22874. #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  22875. uint64x2_t __s0 = __p0; \
  22876. uint64x2_t __s1 = __p1; \
  22877. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22878. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22879. uint64x2_t __ret; \
  22880. __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
  22881. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22882. __ret; \
  22883. })
  22884. #endif
  22885. #ifdef __LITTLE_ENDIAN__
  22886. #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22887. uint16x8_t __s0 = __p0; \
  22888. uint16x8_t __s1 = __p1; \
  22889. uint16x8_t __ret; \
  22890. __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
  22891. __ret; \
  22892. })
  22893. #else
  22894. #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  22895. uint16x8_t __s0 = __p0; \
  22896. uint16x8_t __s1 = __p1; \
  22897. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22898. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22899. uint16x8_t __ret; \
  22900. __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
  22901. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22902. __ret; \
  22903. })
  22904. #endif
  22905. #ifdef __LITTLE_ENDIAN__
  22906. #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22907. int8x16_t __s0 = __p0; \
  22908. int8x16_t __s1 = __p1; \
  22909. int8x16_t __ret; \
  22910. __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
  22911. __ret; \
  22912. })
  22913. #else
  22914. #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  22915. int8x16_t __s0 = __p0; \
  22916. int8x16_t __s1 = __p1; \
  22917. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22918. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22919. int8x16_t __ret; \
  22920. __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
  22921. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  22922. __ret; \
  22923. })
  22924. #endif
  22925. #ifdef __LITTLE_ENDIAN__
  22926. #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22927. int32x4_t __s0 = __p0; \
  22928. int32x4_t __s1 = __p1; \
  22929. int32x4_t __ret; \
  22930. __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
  22931. __ret; \
  22932. })
  22933. #else
  22934. #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  22935. int32x4_t __s0 = __p0; \
  22936. int32x4_t __s1 = __p1; \
  22937. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  22938. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  22939. int32x4_t __ret; \
  22940. __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
  22941. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  22942. __ret; \
  22943. })
  22944. #endif
  22945. #ifdef __LITTLE_ENDIAN__
  22946. #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22947. int64x2_t __s0 = __p0; \
  22948. int64x2_t __s1 = __p1; \
  22949. int64x2_t __ret; \
  22950. __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
  22951. __ret; \
  22952. })
  22953. #else
  22954. #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  22955. int64x2_t __s0 = __p0; \
  22956. int64x2_t __s1 = __p1; \
  22957. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  22958. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  22959. int64x2_t __ret; \
  22960. __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
  22961. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  22962. __ret; \
  22963. })
  22964. #endif
  22965. #ifdef __LITTLE_ENDIAN__
  22966. #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22967. int16x8_t __s0 = __p0; \
  22968. int16x8_t __s1 = __p1; \
  22969. int16x8_t __ret; \
  22970. __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
  22971. __ret; \
  22972. })
  22973. #else
  22974. #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  22975. int16x8_t __s0 = __p0; \
  22976. int16x8_t __s1 = __p1; \
  22977. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22978. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22979. int16x8_t __ret; \
  22980. __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
  22981. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  22982. __ret; \
  22983. })
  22984. #endif
  22985. #ifdef __LITTLE_ENDIAN__
  22986. #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22987. uint8x8_t __s0 = __p0; \
  22988. uint8x8_t __s1 = __p1; \
  22989. uint8x8_t __ret; \
  22990. __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
  22991. __ret; \
  22992. })
  22993. #else
  22994. #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
  22995. uint8x8_t __s0 = __p0; \
  22996. uint8x8_t __s1 = __p1; \
  22997. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  22998. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  22999. uint8x8_t __ret; \
  23000. __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
  23001. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23002. __ret; \
  23003. })
  23004. #endif
  23005. #ifdef __LITTLE_ENDIAN__
  23006. #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23007. uint32x2_t __s0 = __p0; \
  23008. uint32x2_t __s1 = __p1; \
  23009. uint32x2_t __ret; \
  23010. __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
  23011. __ret; \
  23012. })
  23013. #else
  23014. #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23015. uint32x2_t __s0 = __p0; \
  23016. uint32x2_t __s1 = __p1; \
  23017. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23018. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23019. uint32x2_t __ret; \
  23020. __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
  23021. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23022. __ret; \
  23023. })
  23024. #endif
  23025. #ifdef __LITTLE_ENDIAN__
  23026. #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23027. uint64x1_t __s0 = __p0; \
  23028. uint64x1_t __s1 = __p1; \
  23029. uint64x1_t __ret; \
  23030. __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  23031. __ret; \
  23032. })
  23033. #else
  23034. #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23035. uint64x1_t __s0 = __p0; \
  23036. uint64x1_t __s1 = __p1; \
  23037. uint64x1_t __ret; \
  23038. __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  23039. __ret; \
  23040. })
  23041. #endif
  23042. #ifdef __LITTLE_ENDIAN__
  23043. #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23044. uint16x4_t __s0 = __p0; \
  23045. uint16x4_t __s1 = __p1; \
  23046. uint16x4_t __ret; \
  23047. __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
  23048. __ret; \
  23049. })
  23050. #else
  23051. #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23052. uint16x4_t __s0 = __p0; \
  23053. uint16x4_t __s1 = __p1; \
  23054. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23055. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23056. uint16x4_t __ret; \
  23057. __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
  23058. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23059. __ret; \
  23060. })
  23061. #endif
  23062. #ifdef __LITTLE_ENDIAN__
  23063. #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23064. int8x8_t __s0 = __p0; \
  23065. int8x8_t __s1 = __p1; \
  23066. int8x8_t __ret; \
  23067. __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
  23068. __ret; \
  23069. })
  23070. #else
  23071. #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23072. int8x8_t __s0 = __p0; \
  23073. int8x8_t __s1 = __p1; \
  23074. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23075. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23076. int8x8_t __ret; \
  23077. __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
  23078. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23079. __ret; \
  23080. })
  23081. #endif
  23082. #ifdef __LITTLE_ENDIAN__
  23083. #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23084. int32x2_t __s0 = __p0; \
  23085. int32x2_t __s1 = __p1; \
  23086. int32x2_t __ret; \
  23087. __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
  23088. __ret; \
  23089. })
  23090. #else
  23091. #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23092. int32x2_t __s0 = __p0; \
  23093. int32x2_t __s1 = __p1; \
  23094. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23095. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23096. int32x2_t __ret; \
  23097. __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
  23098. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23099. __ret; \
  23100. })
  23101. #endif
  23102. #ifdef __LITTLE_ENDIAN__
  23103. #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23104. int64x1_t __s0 = __p0; \
  23105. int64x1_t __s1 = __p1; \
  23106. int64x1_t __ret; \
  23107. __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  23108. __ret; \
  23109. })
  23110. #else
  23111. #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23112. int64x1_t __s0 = __p0; \
  23113. int64x1_t __s1 = __p1; \
  23114. int64x1_t __ret; \
  23115. __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  23116. __ret; \
  23117. })
  23118. #endif
  23119. #ifdef __LITTLE_ENDIAN__
  23120. #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23121. int16x4_t __s0 = __p0; \
  23122. int16x4_t __s1 = __p1; \
  23123. int16x4_t __ret; \
  23124. __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
  23125. __ret; \
  23126. })
  23127. #else
  23128. #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23129. int16x4_t __s0 = __p0; \
  23130. int16x4_t __s1 = __p1; \
  23131. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23132. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23133. int16x4_t __ret; \
  23134. __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
  23135. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23136. __ret; \
  23137. })
  23138. #endif
  23139. #ifdef __LITTLE_ENDIAN__
  23140. #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
  23141. poly8x8_t __s0 = __p0; \
  23142. poly8x8_t __s1 = __p1; \
  23143. poly8x8_t __ret; \
  23144. __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
  23145. __ret; \
  23146. })
  23147. #else
  23148. #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
  23149. poly8x8_t __s0 = __p0; \
  23150. poly8x8_t __s1 = __p1; \
  23151. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23152. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23153. poly8x8_t __ret; \
  23154. __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
  23155. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23156. __ret; \
  23157. })
  23158. #endif
  23159. #ifdef __LITTLE_ENDIAN__
  23160. #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
  23161. poly16x4_t __s0 = __p0; \
  23162. poly16x4_t __s1 = __p1; \
  23163. poly16x4_t __ret; \
  23164. __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
  23165. __ret; \
  23166. })
  23167. #else
  23168. #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
  23169. poly16x4_t __s0 = __p0; \
  23170. poly16x4_t __s1 = __p1; \
  23171. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23172. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23173. poly16x4_t __ret; \
  23174. __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
  23175. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23176. __ret; \
  23177. })
  23178. #endif
  23179. #ifdef __LITTLE_ENDIAN__
  23180. #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
  23181. poly8x16_t __s0 = __p0; \
  23182. poly8x16_t __s1 = __p1; \
  23183. poly8x16_t __ret; \
  23184. __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
  23185. __ret; \
  23186. })
  23187. #else
  23188. #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
  23189. poly8x16_t __s0 = __p0; \
  23190. poly8x16_t __s1 = __p1; \
  23191. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23192. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23193. poly8x16_t __ret; \
  23194. __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
  23195. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23196. __ret; \
  23197. })
  23198. #endif
  23199. #ifdef __LITTLE_ENDIAN__
  23200. #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
  23201. poly16x8_t __s0 = __p0; \
  23202. poly16x8_t __s1 = __p1; \
  23203. poly16x8_t __ret; \
  23204. __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
  23205. __ret; \
  23206. })
  23207. #else
  23208. #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
  23209. poly16x8_t __s0 = __p0; \
  23210. poly16x8_t __s1 = __p1; \
  23211. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23212. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23213. poly16x8_t __ret; \
  23214. __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
  23215. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23216. __ret; \
  23217. })
  23218. #endif
  23219. #ifdef __LITTLE_ENDIAN__
  23220. #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  23221. uint8x16_t __s0 = __p0; \
  23222. uint8x16_t __s1 = __p1; \
  23223. uint8x16_t __ret; \
  23224. __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
  23225. __ret; \
  23226. })
  23227. #else
  23228. #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
  23229. uint8x16_t __s0 = __p0; \
  23230. uint8x16_t __s1 = __p1; \
  23231. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23232. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23233. uint8x16_t __ret; \
  23234. __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
  23235. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23236. __ret; \
  23237. })
  23238. #endif
  23239. #ifdef __LITTLE_ENDIAN__
  23240. #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23241. uint32x4_t __s0 = __p0; \
  23242. uint32x4_t __s1 = __p1; \
  23243. uint32x4_t __ret; \
  23244. __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
  23245. __ret; \
  23246. })
  23247. #else
  23248. #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23249. uint32x4_t __s0 = __p0; \
  23250. uint32x4_t __s1 = __p1; \
  23251. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23252. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23253. uint32x4_t __ret; \
  23254. __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
  23255. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23256. __ret; \
  23257. })
  23258. #endif
  23259. #ifdef __LITTLE_ENDIAN__
  23260. #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23261. uint64x2_t __s0 = __p0; \
  23262. uint64x2_t __s1 = __p1; \
  23263. uint64x2_t __ret; \
  23264. __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
  23265. __ret; \
  23266. })
  23267. #else
  23268. #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23269. uint64x2_t __s0 = __p0; \
  23270. uint64x2_t __s1 = __p1; \
  23271. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23272. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23273. uint64x2_t __ret; \
  23274. __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
  23275. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23276. __ret; \
  23277. })
  23278. #endif
  23279. #ifdef __LITTLE_ENDIAN__
  23280. #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23281. uint16x8_t __s0 = __p0; \
  23282. uint16x8_t __s1 = __p1; \
  23283. uint16x8_t __ret; \
  23284. __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
  23285. __ret; \
  23286. })
  23287. #else
  23288. #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23289. uint16x8_t __s0 = __p0; \
  23290. uint16x8_t __s1 = __p1; \
  23291. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23292. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23293. uint16x8_t __ret; \
  23294. __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
  23295. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23296. __ret; \
  23297. })
  23298. #endif
  23299. #ifdef __LITTLE_ENDIAN__
  23300. #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23301. int8x16_t __s0 = __p0; \
  23302. int8x16_t __s1 = __p1; \
  23303. int8x16_t __ret; \
  23304. __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
  23305. __ret; \
  23306. })
  23307. #else
  23308. #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23309. int8x16_t __s0 = __p0; \
  23310. int8x16_t __s1 = __p1; \
  23311. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23312. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23313. int8x16_t __ret; \
  23314. __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
  23315. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23316. __ret; \
  23317. })
  23318. #endif
  23319. #ifdef __LITTLE_ENDIAN__
  23320. #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23321. int32x4_t __s0 = __p0; \
  23322. int32x4_t __s1 = __p1; \
  23323. int32x4_t __ret; \
  23324. __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
  23325. __ret; \
  23326. })
  23327. #else
  23328. #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23329. int32x4_t __s0 = __p0; \
  23330. int32x4_t __s1 = __p1; \
  23331. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23332. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23333. int32x4_t __ret; \
  23334. __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
  23335. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23336. __ret; \
  23337. })
  23338. #endif
  23339. #ifdef __LITTLE_ENDIAN__
  23340. #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23341. int64x2_t __s0 = __p0; \
  23342. int64x2_t __s1 = __p1; \
  23343. int64x2_t __ret; \
  23344. __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
  23345. __ret; \
  23346. })
  23347. #else
  23348. #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23349. int64x2_t __s0 = __p0; \
  23350. int64x2_t __s1 = __p1; \
  23351. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23352. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23353. int64x2_t __ret; \
  23354. __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
  23355. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23356. __ret; \
  23357. })
  23358. #endif
  23359. #ifdef __LITTLE_ENDIAN__
  23360. #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23361. int16x8_t __s0 = __p0; \
  23362. int16x8_t __s1 = __p1; \
  23363. int16x8_t __ret; \
  23364. __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
  23365. __ret; \
  23366. })
  23367. #else
  23368. #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23369. int16x8_t __s0 = __p0; \
  23370. int16x8_t __s1 = __p1; \
  23371. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23372. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23373. int16x8_t __ret; \
  23374. __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
  23375. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23376. __ret; \
  23377. })
  23378. #endif
  23379. #ifdef __LITTLE_ENDIAN__
  23380. #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
  23381. uint8x8_t __s0 = __p0; \
  23382. uint8x8_t __s1 = __p1; \
  23383. uint8x8_t __ret; \
  23384. __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
  23385. __ret; \
  23386. })
  23387. #else
  23388. #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
  23389. uint8x8_t __s0 = __p0; \
  23390. uint8x8_t __s1 = __p1; \
  23391. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23392. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23393. uint8x8_t __ret; \
  23394. __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
  23395. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23396. __ret; \
  23397. })
  23398. #endif
  23399. #ifdef __LITTLE_ENDIAN__
  23400. #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23401. uint32x2_t __s0 = __p0; \
  23402. uint32x2_t __s1 = __p1; \
  23403. uint32x2_t __ret; \
  23404. __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
  23405. __ret; \
  23406. })
  23407. #else
  23408. #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
  23409. uint32x2_t __s0 = __p0; \
  23410. uint32x2_t __s1 = __p1; \
  23411. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23412. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23413. uint32x2_t __ret; \
  23414. __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
  23415. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23416. __ret; \
  23417. })
  23418. #endif
  23419. #ifdef __LITTLE_ENDIAN__
  23420. #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23421. uint64x1_t __s0 = __p0; \
  23422. uint64x1_t __s1 = __p1; \
  23423. uint64x1_t __ret; \
  23424. __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  23425. __ret; \
  23426. })
  23427. #else
  23428. #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
  23429. uint64x1_t __s0 = __p0; \
  23430. uint64x1_t __s1 = __p1; \
  23431. uint64x1_t __ret; \
  23432. __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
  23433. __ret; \
  23434. })
  23435. #endif
  23436. #ifdef __LITTLE_ENDIAN__
  23437. #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23438. uint16x4_t __s0 = __p0; \
  23439. uint16x4_t __s1 = __p1; \
  23440. uint16x4_t __ret; \
  23441. __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
  23442. __ret; \
  23443. })
  23444. #else
  23445. #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
  23446. uint16x4_t __s0 = __p0; \
  23447. uint16x4_t __s1 = __p1; \
  23448. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23449. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23450. uint16x4_t __ret; \
  23451. __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
  23452. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23453. __ret; \
  23454. })
  23455. #endif
  23456. #ifdef __LITTLE_ENDIAN__
  23457. #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23458. int8x8_t __s0 = __p0; \
  23459. int8x8_t __s1 = __p1; \
  23460. int8x8_t __ret; \
  23461. __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
  23462. __ret; \
  23463. })
  23464. #else
  23465. #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
  23466. int8x8_t __s0 = __p0; \
  23467. int8x8_t __s1 = __p1; \
  23468. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  23469. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23470. int8x8_t __ret; \
  23471. __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
  23472. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  23473. __ret; \
  23474. })
  23475. #endif
  23476. #ifdef __LITTLE_ENDIAN__
  23477. #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23478. int32x2_t __s0 = __p0; \
  23479. int32x2_t __s1 = __p1; \
  23480. int32x2_t __ret; \
  23481. __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
  23482. __ret; \
  23483. })
  23484. #else
  23485. #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
  23486. int32x2_t __s0 = __p0; \
  23487. int32x2_t __s1 = __p1; \
  23488. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  23489. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23490. int32x2_t __ret; \
  23491. __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
  23492. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  23493. __ret; \
  23494. })
  23495. #endif
  23496. #ifdef __LITTLE_ENDIAN__
  23497. #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23498. int64x1_t __s0 = __p0; \
  23499. int64x1_t __s1 = __p1; \
  23500. int64x1_t __ret; \
  23501. __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  23502. __ret; \
  23503. })
  23504. #else
  23505. #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
  23506. int64x1_t __s0 = __p0; \
  23507. int64x1_t __s1 = __p1; \
  23508. int64x1_t __ret; \
  23509. __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
  23510. __ret; \
  23511. })
  23512. #endif
  23513. #ifdef __LITTLE_ENDIAN__
  23514. #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23515. int16x4_t __s0 = __p0; \
  23516. int16x4_t __s1 = __p1; \
  23517. int16x4_t __ret; \
  23518. __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
  23519. __ret; \
  23520. })
  23521. #else
  23522. #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
  23523. int16x4_t __s0 = __p0; \
  23524. int16x4_t __s1 = __p1; \
  23525. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  23526. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23527. int16x4_t __ret; \
  23528. __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
  23529. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  23530. __ret; \
  23531. })
  23532. #endif
  23533. #ifdef __LITTLE_ENDIAN__
  23534. #define vst1_p8(__p0, __p1) __extension__ ({ \
  23535. poly8x8_t __s1 = __p1; \
  23536. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
  23537. })
  23538. #else
  23539. #define vst1_p8(__p0, __p1) __extension__ ({ \
  23540. poly8x8_t __s1 = __p1; \
  23541. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23542. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
  23543. })
  23544. #endif
  23545. #ifdef __LITTLE_ENDIAN__
  23546. #define vst1_p16(__p0, __p1) __extension__ ({ \
  23547. poly16x4_t __s1 = __p1; \
  23548. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
  23549. })
  23550. #else
  23551. #define vst1_p16(__p0, __p1) __extension__ ({ \
  23552. poly16x4_t __s1 = __p1; \
  23553. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23554. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
  23555. })
  23556. #endif
  23557. #ifdef __LITTLE_ENDIAN__
  23558. #define vst1q_p8(__p0, __p1) __extension__ ({ \
  23559. poly8x16_t __s1 = __p1; \
  23560. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
  23561. })
  23562. #else
  23563. #define vst1q_p8(__p0, __p1) __extension__ ({ \
  23564. poly8x16_t __s1 = __p1; \
  23565. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23566. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
  23567. })
  23568. #endif
  23569. #ifdef __LITTLE_ENDIAN__
  23570. #define vst1q_p16(__p0, __p1) __extension__ ({ \
  23571. poly16x8_t __s1 = __p1; \
  23572. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
  23573. })
  23574. #else
  23575. #define vst1q_p16(__p0, __p1) __extension__ ({ \
  23576. poly16x8_t __s1 = __p1; \
  23577. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23578. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
  23579. })
  23580. #endif
  23581. #ifdef __LITTLE_ENDIAN__
  23582. #define vst1q_u8(__p0, __p1) __extension__ ({ \
  23583. uint8x16_t __s1 = __p1; \
  23584. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
  23585. })
  23586. #else
  23587. #define vst1q_u8(__p0, __p1) __extension__ ({ \
  23588. uint8x16_t __s1 = __p1; \
  23589. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23590. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
  23591. })
  23592. #endif
  23593. #ifdef __LITTLE_ENDIAN__
  23594. #define vst1q_u32(__p0, __p1) __extension__ ({ \
  23595. uint32x4_t __s1 = __p1; \
  23596. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
  23597. })
  23598. #else
  23599. #define vst1q_u32(__p0, __p1) __extension__ ({ \
  23600. uint32x4_t __s1 = __p1; \
  23601. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23602. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
  23603. })
  23604. #endif
  23605. #ifdef __LITTLE_ENDIAN__
  23606. #define vst1q_u64(__p0, __p1) __extension__ ({ \
  23607. uint64x2_t __s1 = __p1; \
  23608. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
  23609. })
  23610. #else
  23611. #define vst1q_u64(__p0, __p1) __extension__ ({ \
  23612. uint64x2_t __s1 = __p1; \
  23613. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23614. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
  23615. })
  23616. #endif
  23617. #ifdef __LITTLE_ENDIAN__
  23618. #define vst1q_u16(__p0, __p1) __extension__ ({ \
  23619. uint16x8_t __s1 = __p1; \
  23620. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
  23621. })
  23622. #else
  23623. #define vst1q_u16(__p0, __p1) __extension__ ({ \
  23624. uint16x8_t __s1 = __p1; \
  23625. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23626. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
  23627. })
  23628. #endif
  23629. #ifdef __LITTLE_ENDIAN__
  23630. #define vst1q_s8(__p0, __p1) __extension__ ({ \
  23631. int8x16_t __s1 = __p1; \
  23632. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
  23633. })
  23634. #else
  23635. #define vst1q_s8(__p0, __p1) __extension__ ({ \
  23636. int8x16_t __s1 = __p1; \
  23637. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23638. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
  23639. })
  23640. #endif
  23641. #ifdef __LITTLE_ENDIAN__
  23642. #define vst1q_f32(__p0, __p1) __extension__ ({ \
  23643. float32x4_t __s1 = __p1; \
  23644. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
  23645. })
  23646. #else
  23647. #define vst1q_f32(__p0, __p1) __extension__ ({ \
  23648. float32x4_t __s1 = __p1; \
  23649. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23650. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
  23651. })
  23652. #endif
  23653. #ifdef __LITTLE_ENDIAN__
  23654. #define vst1q_f16(__p0, __p1) __extension__ ({ \
  23655. float16x8_t __s1 = __p1; \
  23656. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
  23657. })
  23658. #else
  23659. #define vst1q_f16(__p0, __p1) __extension__ ({ \
  23660. float16x8_t __s1 = __p1; \
  23661. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23662. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
  23663. })
  23664. #endif
  23665. #ifdef __LITTLE_ENDIAN__
  23666. #define vst1q_s32(__p0, __p1) __extension__ ({ \
  23667. int32x4_t __s1 = __p1; \
  23668. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
  23669. })
  23670. #else
  23671. #define vst1q_s32(__p0, __p1) __extension__ ({ \
  23672. int32x4_t __s1 = __p1; \
  23673. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23674. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
  23675. })
  23676. #endif
  23677. #ifdef __LITTLE_ENDIAN__
  23678. #define vst1q_s64(__p0, __p1) __extension__ ({ \
  23679. int64x2_t __s1 = __p1; \
  23680. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
  23681. })
  23682. #else
  23683. #define vst1q_s64(__p0, __p1) __extension__ ({ \
  23684. int64x2_t __s1 = __p1; \
  23685. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23686. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
  23687. })
  23688. #endif
  23689. #ifdef __LITTLE_ENDIAN__
  23690. #define vst1q_s16(__p0, __p1) __extension__ ({ \
  23691. int16x8_t __s1 = __p1; \
  23692. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
  23693. })
  23694. #else
  23695. #define vst1q_s16(__p0, __p1) __extension__ ({ \
  23696. int16x8_t __s1 = __p1; \
  23697. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23698. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
  23699. })
  23700. #endif
  23701. #ifdef __LITTLE_ENDIAN__
  23702. #define vst1_u8(__p0, __p1) __extension__ ({ \
  23703. uint8x8_t __s1 = __p1; \
  23704. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
  23705. })
  23706. #else
  23707. #define vst1_u8(__p0, __p1) __extension__ ({ \
  23708. uint8x8_t __s1 = __p1; \
  23709. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23710. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
  23711. })
  23712. #endif
  23713. #ifdef __LITTLE_ENDIAN__
  23714. #define vst1_u32(__p0, __p1) __extension__ ({ \
  23715. uint32x2_t __s1 = __p1; \
  23716. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
  23717. })
  23718. #else
  23719. #define vst1_u32(__p0, __p1) __extension__ ({ \
  23720. uint32x2_t __s1 = __p1; \
  23721. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23722. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
  23723. })
  23724. #endif
  23725. #ifdef __LITTLE_ENDIAN__
  23726. #define vst1_u64(__p0, __p1) __extension__ ({ \
  23727. uint64x1_t __s1 = __p1; \
  23728. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
  23729. })
  23730. #else
  23731. #define vst1_u64(__p0, __p1) __extension__ ({ \
  23732. uint64x1_t __s1 = __p1; \
  23733. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
  23734. })
  23735. #endif
  23736. #ifdef __LITTLE_ENDIAN__
  23737. #define vst1_u16(__p0, __p1) __extension__ ({ \
  23738. uint16x4_t __s1 = __p1; \
  23739. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
  23740. })
  23741. #else
  23742. #define vst1_u16(__p0, __p1) __extension__ ({ \
  23743. uint16x4_t __s1 = __p1; \
  23744. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23745. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
  23746. })
  23747. #endif
  23748. #ifdef __LITTLE_ENDIAN__
  23749. #define vst1_s8(__p0, __p1) __extension__ ({ \
  23750. int8x8_t __s1 = __p1; \
  23751. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
  23752. })
  23753. #else
  23754. #define vst1_s8(__p0, __p1) __extension__ ({ \
  23755. int8x8_t __s1 = __p1; \
  23756. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23757. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
  23758. })
  23759. #endif
  23760. #ifdef __LITTLE_ENDIAN__
  23761. #define vst1_f32(__p0, __p1) __extension__ ({ \
  23762. float32x2_t __s1 = __p1; \
  23763. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
  23764. })
  23765. #else
  23766. #define vst1_f32(__p0, __p1) __extension__ ({ \
  23767. float32x2_t __s1 = __p1; \
  23768. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23769. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
  23770. })
  23771. #endif
  23772. #ifdef __LITTLE_ENDIAN__
  23773. #define vst1_f16(__p0, __p1) __extension__ ({ \
  23774. float16x4_t __s1 = __p1; \
  23775. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
  23776. })
  23777. #else
  23778. #define vst1_f16(__p0, __p1) __extension__ ({ \
  23779. float16x4_t __s1 = __p1; \
  23780. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23781. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
  23782. })
  23783. #endif
  23784. #ifdef __LITTLE_ENDIAN__
  23785. #define vst1_s32(__p0, __p1) __extension__ ({ \
  23786. int32x2_t __s1 = __p1; \
  23787. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
  23788. })
  23789. #else
  23790. #define vst1_s32(__p0, __p1) __extension__ ({ \
  23791. int32x2_t __s1 = __p1; \
  23792. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23793. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
  23794. })
  23795. #endif
  23796. #ifdef __LITTLE_ENDIAN__
  23797. #define vst1_s64(__p0, __p1) __extension__ ({ \
  23798. int64x1_t __s1 = __p1; \
  23799. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
  23800. })
  23801. #else
  23802. #define vst1_s64(__p0, __p1) __extension__ ({ \
  23803. int64x1_t __s1 = __p1; \
  23804. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
  23805. })
  23806. #endif
  23807. #ifdef __LITTLE_ENDIAN__
  23808. #define vst1_s16(__p0, __p1) __extension__ ({ \
  23809. int16x4_t __s1 = __p1; \
  23810. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
  23811. })
  23812. #else
  23813. #define vst1_s16(__p0, __p1) __extension__ ({ \
  23814. int16x4_t __s1 = __p1; \
  23815. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23816. __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
  23817. })
  23818. #endif
  23819. #ifdef __LITTLE_ENDIAN__
  23820. #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  23821. poly8x8_t __s1 = __p1; \
  23822. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
  23823. })
  23824. #else
  23825. #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  23826. poly8x8_t __s1 = __p1; \
  23827. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23828. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
  23829. })
  23830. #endif
  23831. #ifdef __LITTLE_ENDIAN__
  23832. #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  23833. poly16x4_t __s1 = __p1; \
  23834. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
  23835. })
  23836. #else
  23837. #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  23838. poly16x4_t __s1 = __p1; \
  23839. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23840. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
  23841. })
  23842. #endif
  23843. #ifdef __LITTLE_ENDIAN__
  23844. #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  23845. poly8x16_t __s1 = __p1; \
  23846. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
  23847. })
  23848. #else
  23849. #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  23850. poly8x16_t __s1 = __p1; \
  23851. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23852. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
  23853. })
  23854. #endif
  23855. #ifdef __LITTLE_ENDIAN__
  23856. #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  23857. poly16x8_t __s1 = __p1; \
  23858. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
  23859. })
  23860. #else
  23861. #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  23862. poly16x8_t __s1 = __p1; \
  23863. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23864. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
  23865. })
  23866. #endif
  23867. #ifdef __LITTLE_ENDIAN__
  23868. #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  23869. uint8x16_t __s1 = __p1; \
  23870. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
  23871. })
  23872. #else
  23873. #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  23874. uint8x16_t __s1 = __p1; \
  23875. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23876. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
  23877. })
  23878. #endif
  23879. #ifdef __LITTLE_ENDIAN__
  23880. #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  23881. uint32x4_t __s1 = __p1; \
  23882. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
  23883. })
  23884. #else
  23885. #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  23886. uint32x4_t __s1 = __p1; \
  23887. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23888. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
  23889. })
  23890. #endif
  23891. #ifdef __LITTLE_ENDIAN__
  23892. #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  23893. uint64x2_t __s1 = __p1; \
  23894. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
  23895. })
  23896. #else
  23897. #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  23898. uint64x2_t __s1 = __p1; \
  23899. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23900. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
  23901. })
  23902. #endif
  23903. #ifdef __LITTLE_ENDIAN__
  23904. #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  23905. uint16x8_t __s1 = __p1; \
  23906. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
  23907. })
  23908. #else
  23909. #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  23910. uint16x8_t __s1 = __p1; \
  23911. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23912. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
  23913. })
  23914. #endif
  23915. #ifdef __LITTLE_ENDIAN__
  23916. #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  23917. int8x16_t __s1 = __p1; \
  23918. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
  23919. })
  23920. #else
  23921. #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  23922. int8x16_t __s1 = __p1; \
  23923. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  23924. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
  23925. })
  23926. #endif
  23927. #ifdef __LITTLE_ENDIAN__
  23928. #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  23929. float32x4_t __s1 = __p1; \
  23930. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
  23931. })
  23932. #else
  23933. #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  23934. float32x4_t __s1 = __p1; \
  23935. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23936. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
  23937. })
  23938. #endif
  23939. #ifdef __LITTLE_ENDIAN__
  23940. #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  23941. float16x8_t __s1 = __p1; \
  23942. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
  23943. })
  23944. #else
  23945. #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  23946. float16x8_t __s1 = __p1; \
  23947. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23948. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
  23949. })
  23950. #endif
  23951. #ifdef __LITTLE_ENDIAN__
  23952. #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  23953. int32x4_t __s1 = __p1; \
  23954. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
  23955. })
  23956. #else
  23957. #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  23958. int32x4_t __s1 = __p1; \
  23959. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  23960. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
  23961. })
  23962. #endif
  23963. #ifdef __LITTLE_ENDIAN__
  23964. #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  23965. int64x2_t __s1 = __p1; \
  23966. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
  23967. })
  23968. #else
  23969. #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  23970. int64x2_t __s1 = __p1; \
  23971. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  23972. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
  23973. })
  23974. #endif
  23975. #ifdef __LITTLE_ENDIAN__
  23976. #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  23977. int16x8_t __s1 = __p1; \
  23978. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
  23979. })
  23980. #else
  23981. #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  23982. int16x8_t __s1 = __p1; \
  23983. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23984. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
  23985. })
  23986. #endif
  23987. #ifdef __LITTLE_ENDIAN__
  23988. #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  23989. uint8x8_t __s1 = __p1; \
  23990. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
  23991. })
  23992. #else
  23993. #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  23994. uint8x8_t __s1 = __p1; \
  23995. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  23996. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
  23997. })
  23998. #endif
  23999. #ifdef __LITTLE_ENDIAN__
  24000. #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24001. uint32x2_t __s1 = __p1; \
  24002. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
  24003. })
  24004. #else
  24005. #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24006. uint32x2_t __s1 = __p1; \
  24007. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  24008. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
  24009. })
  24010. #endif
  24011. #ifdef __LITTLE_ENDIAN__
  24012. #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  24013. uint64x1_t __s1 = __p1; \
  24014. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
  24015. })
  24016. #else
  24017. #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  24018. uint64x1_t __s1 = __p1; \
  24019. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
  24020. })
  24021. #endif
  24022. #ifdef __LITTLE_ENDIAN__
  24023. #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24024. uint16x4_t __s1 = __p1; \
  24025. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
  24026. })
  24027. #else
  24028. #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24029. uint16x4_t __s1 = __p1; \
  24030. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  24031. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
  24032. })
  24033. #endif
  24034. #ifdef __LITTLE_ENDIAN__
  24035. #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  24036. int8x8_t __s1 = __p1; \
  24037. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
  24038. })
  24039. #else
  24040. #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  24041. int8x8_t __s1 = __p1; \
  24042. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  24043. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
  24044. })
  24045. #endif
  24046. #ifdef __LITTLE_ENDIAN__
  24047. #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24048. float32x2_t __s1 = __p1; \
  24049. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
  24050. })
  24051. #else
  24052. #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24053. float32x2_t __s1 = __p1; \
  24054. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  24055. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
  24056. })
  24057. #endif
  24058. #ifdef __LITTLE_ENDIAN__
  24059. #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24060. float16x4_t __s1 = __p1; \
  24061. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
  24062. })
  24063. #else
  24064. #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24065. float16x4_t __s1 = __p1; \
  24066. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  24067. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
  24068. })
  24069. #endif
  24070. #ifdef __LITTLE_ENDIAN__
  24071. #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24072. int32x2_t __s1 = __p1; \
  24073. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
  24074. })
  24075. #else
  24076. #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24077. int32x2_t __s1 = __p1; \
  24078. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  24079. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
  24080. })
  24081. #endif
  24082. #ifdef __LITTLE_ENDIAN__
  24083. #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  24084. int64x1_t __s1 = __p1; \
  24085. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
  24086. })
  24087. #else
  24088. #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  24089. int64x1_t __s1 = __p1; \
  24090. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
  24091. })
  24092. #endif
  24093. #ifdef __LITTLE_ENDIAN__
  24094. #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24095. int16x4_t __s1 = __p1; \
  24096. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
  24097. })
  24098. #else
  24099. #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24100. int16x4_t __s1 = __p1; \
  24101. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  24102. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
  24103. })
  24104. #endif
  24105. #ifdef __LITTLE_ENDIAN__
  24106. #define vst2_p8(__p0, __p1) __extension__ ({ \
  24107. poly8x8x2_t __s1 = __p1; \
  24108. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
  24109. })
  24110. #else
  24111. #define vst2_p8(__p0, __p1) __extension__ ({ \
  24112. poly8x8x2_t __s1 = __p1; \
  24113. poly8x8x2_t __rev1; \
  24114. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24115. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24116. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
  24117. })
  24118. #endif
  24119. #ifdef __LITTLE_ENDIAN__
  24120. #define vst2_p16(__p0, __p1) __extension__ ({ \
  24121. poly16x4x2_t __s1 = __p1; \
  24122. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
  24123. })
  24124. #else
  24125. #define vst2_p16(__p0, __p1) __extension__ ({ \
  24126. poly16x4x2_t __s1 = __p1; \
  24127. poly16x4x2_t __rev1; \
  24128. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24129. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24130. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
  24131. })
  24132. #endif
  24133. #ifdef __LITTLE_ENDIAN__
  24134. #define vst2q_p8(__p0, __p1) __extension__ ({ \
  24135. poly8x16x2_t __s1 = __p1; \
  24136. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
  24137. })
  24138. #else
  24139. #define vst2q_p8(__p0, __p1) __extension__ ({ \
  24140. poly8x16x2_t __s1 = __p1; \
  24141. poly8x16x2_t __rev1; \
  24142. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24143. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24144. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
  24145. })
  24146. #endif
  24147. #ifdef __LITTLE_ENDIAN__
  24148. #define vst2q_p16(__p0, __p1) __extension__ ({ \
  24149. poly16x8x2_t __s1 = __p1; \
  24150. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
  24151. })
  24152. #else
  24153. #define vst2q_p16(__p0, __p1) __extension__ ({ \
  24154. poly16x8x2_t __s1 = __p1; \
  24155. poly16x8x2_t __rev1; \
  24156. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24157. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24158. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
  24159. })
  24160. #endif
  24161. #ifdef __LITTLE_ENDIAN__
  24162. #define vst2q_u8(__p0, __p1) __extension__ ({ \
  24163. uint8x16x2_t __s1 = __p1; \
  24164. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
  24165. })
  24166. #else
  24167. #define vst2q_u8(__p0, __p1) __extension__ ({ \
  24168. uint8x16x2_t __s1 = __p1; \
  24169. uint8x16x2_t __rev1; \
  24170. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24171. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24172. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
  24173. })
  24174. #endif
  24175. #ifdef __LITTLE_ENDIAN__
  24176. #define vst2q_u32(__p0, __p1) __extension__ ({ \
  24177. uint32x4x2_t __s1 = __p1; \
  24178. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
  24179. })
  24180. #else
  24181. #define vst2q_u32(__p0, __p1) __extension__ ({ \
  24182. uint32x4x2_t __s1 = __p1; \
  24183. uint32x4x2_t __rev1; \
  24184. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24185. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24186. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
  24187. })
  24188. #endif
  24189. #ifdef __LITTLE_ENDIAN__
  24190. #define vst2q_u16(__p0, __p1) __extension__ ({ \
  24191. uint16x8x2_t __s1 = __p1; \
  24192. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
  24193. })
  24194. #else
  24195. #define vst2q_u16(__p0, __p1) __extension__ ({ \
  24196. uint16x8x2_t __s1 = __p1; \
  24197. uint16x8x2_t __rev1; \
  24198. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24199. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24200. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
  24201. })
  24202. #endif
  24203. #ifdef __LITTLE_ENDIAN__
  24204. #define vst2q_s8(__p0, __p1) __extension__ ({ \
  24205. int8x16x2_t __s1 = __p1; \
  24206. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
  24207. })
  24208. #else
  24209. #define vst2q_s8(__p0, __p1) __extension__ ({ \
  24210. int8x16x2_t __s1 = __p1; \
  24211. int8x16x2_t __rev1; \
  24212. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24213. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24214. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
  24215. })
  24216. #endif
  24217. #ifdef __LITTLE_ENDIAN__
  24218. #define vst2q_f32(__p0, __p1) __extension__ ({ \
  24219. float32x4x2_t __s1 = __p1; \
  24220. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
  24221. })
  24222. #else
  24223. #define vst2q_f32(__p0, __p1) __extension__ ({ \
  24224. float32x4x2_t __s1 = __p1; \
  24225. float32x4x2_t __rev1; \
  24226. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24227. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24228. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
  24229. })
  24230. #endif
  24231. #ifdef __LITTLE_ENDIAN__
  24232. #define vst2q_f16(__p0, __p1) __extension__ ({ \
  24233. float16x8x2_t __s1 = __p1; \
  24234. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
  24235. })
  24236. #else
  24237. #define vst2q_f16(__p0, __p1) __extension__ ({ \
  24238. float16x8x2_t __s1 = __p1; \
  24239. float16x8x2_t __rev1; \
  24240. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24241. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24242. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
  24243. })
  24244. #endif
  24245. #ifdef __LITTLE_ENDIAN__
  24246. #define vst2q_s32(__p0, __p1) __extension__ ({ \
  24247. int32x4x2_t __s1 = __p1; \
  24248. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
  24249. })
  24250. #else
  24251. #define vst2q_s32(__p0, __p1) __extension__ ({ \
  24252. int32x4x2_t __s1 = __p1; \
  24253. int32x4x2_t __rev1; \
  24254. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24255. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24256. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
  24257. })
  24258. #endif
  24259. #ifdef __LITTLE_ENDIAN__
  24260. #define vst2q_s16(__p0, __p1) __extension__ ({ \
  24261. int16x8x2_t __s1 = __p1; \
  24262. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
  24263. })
  24264. #else
  24265. #define vst2q_s16(__p0, __p1) __extension__ ({ \
  24266. int16x8x2_t __s1 = __p1; \
  24267. int16x8x2_t __rev1; \
  24268. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24269. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24270. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
  24271. })
  24272. #endif
  24273. #ifdef __LITTLE_ENDIAN__
  24274. #define vst2_u8(__p0, __p1) __extension__ ({ \
  24275. uint8x8x2_t __s1 = __p1; \
  24276. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
  24277. })
  24278. #else
  24279. #define vst2_u8(__p0, __p1) __extension__ ({ \
  24280. uint8x8x2_t __s1 = __p1; \
  24281. uint8x8x2_t __rev1; \
  24282. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24283. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24284. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
  24285. })
  24286. #endif
  24287. #ifdef __LITTLE_ENDIAN__
  24288. #define vst2_u32(__p0, __p1) __extension__ ({ \
  24289. uint32x2x2_t __s1 = __p1; \
  24290. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
  24291. })
  24292. #else
  24293. #define vst2_u32(__p0, __p1) __extension__ ({ \
  24294. uint32x2x2_t __s1 = __p1; \
  24295. uint32x2x2_t __rev1; \
  24296. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24297. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24298. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
  24299. })
  24300. #endif
  24301. #ifdef __LITTLE_ENDIAN__
  24302. #define vst2_u64(__p0, __p1) __extension__ ({ \
  24303. uint64x1x2_t __s1 = __p1; \
  24304. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
  24305. })
  24306. #else
  24307. #define vst2_u64(__p0, __p1) __extension__ ({ \
  24308. uint64x1x2_t __s1 = __p1; \
  24309. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
  24310. })
  24311. #endif
  24312. #ifdef __LITTLE_ENDIAN__
  24313. #define vst2_u16(__p0, __p1) __extension__ ({ \
  24314. uint16x4x2_t __s1 = __p1; \
  24315. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
  24316. })
  24317. #else
  24318. #define vst2_u16(__p0, __p1) __extension__ ({ \
  24319. uint16x4x2_t __s1 = __p1; \
  24320. uint16x4x2_t __rev1; \
  24321. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24322. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24323. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
  24324. })
  24325. #endif
  24326. #ifdef __LITTLE_ENDIAN__
  24327. #define vst2_s8(__p0, __p1) __extension__ ({ \
  24328. int8x8x2_t __s1 = __p1; \
  24329. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
  24330. })
  24331. #else
  24332. #define vst2_s8(__p0, __p1) __extension__ ({ \
  24333. int8x8x2_t __s1 = __p1; \
  24334. int8x8x2_t __rev1; \
  24335. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24336. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24337. __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
  24338. })
  24339. #endif
  24340. #ifdef __LITTLE_ENDIAN__
  24341. #define vst2_f32(__p0, __p1) __extension__ ({ \
  24342. float32x2x2_t __s1 = __p1; \
  24343. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
  24344. })
  24345. #else
  24346. #define vst2_f32(__p0, __p1) __extension__ ({ \
  24347. float32x2x2_t __s1 = __p1; \
  24348. float32x2x2_t __rev1; \
  24349. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24350. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24351. __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
  24352. })
  24353. #endif
  24354. #ifdef __LITTLE_ENDIAN__
  24355. #define vst2_f16(__p0, __p1) __extension__ ({ \
  24356. float16x4x2_t __s1 = __p1; \
  24357. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
  24358. })
  24359. #else
  24360. #define vst2_f16(__p0, __p1) __extension__ ({ \
  24361. float16x4x2_t __s1 = __p1; \
  24362. float16x4x2_t __rev1; \
  24363. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24364. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24365. __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
  24366. })
  24367. #endif
  24368. #ifdef __LITTLE_ENDIAN__
  24369. #define vst2_s32(__p0, __p1) __extension__ ({ \
  24370. int32x2x2_t __s1 = __p1; \
  24371. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
  24372. })
  24373. #else
  24374. #define vst2_s32(__p0, __p1) __extension__ ({ \
  24375. int32x2x2_t __s1 = __p1; \
  24376. int32x2x2_t __rev1; \
  24377. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24378. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24379. __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
  24380. })
  24381. #endif
  24382. #ifdef __LITTLE_ENDIAN__
  24383. #define vst2_s64(__p0, __p1) __extension__ ({ \
  24384. int64x1x2_t __s1 = __p1; \
  24385. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
  24386. })
  24387. #else
  24388. #define vst2_s64(__p0, __p1) __extension__ ({ \
  24389. int64x1x2_t __s1 = __p1; \
  24390. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
  24391. })
  24392. #endif
  24393. #ifdef __LITTLE_ENDIAN__
  24394. #define vst2_s16(__p0, __p1) __extension__ ({ \
  24395. int16x4x2_t __s1 = __p1; \
  24396. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
  24397. })
  24398. #else
  24399. #define vst2_s16(__p0, __p1) __extension__ ({ \
  24400. int16x4x2_t __s1 = __p1; \
  24401. int16x4x2_t __rev1; \
  24402. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24403. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24404. __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
  24405. })
  24406. #endif
  24407. #ifdef __LITTLE_ENDIAN__
  24408. #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  24409. poly8x8x2_t __s1 = __p1; \
  24410. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
  24411. })
  24412. #else
  24413. #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  24414. poly8x8x2_t __s1 = __p1; \
  24415. poly8x8x2_t __rev1; \
  24416. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24417. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24418. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
  24419. })
  24420. #endif
  24421. #ifdef __LITTLE_ENDIAN__
  24422. #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24423. poly16x4x2_t __s1 = __p1; \
  24424. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
  24425. })
  24426. #else
  24427. #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24428. poly16x4x2_t __s1 = __p1; \
  24429. poly16x4x2_t __rev1; \
  24430. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24431. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24432. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
  24433. })
  24434. #endif
  24435. #ifdef __LITTLE_ENDIAN__
  24436. #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24437. poly16x8x2_t __s1 = __p1; \
  24438. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
  24439. })
  24440. #else
  24441. #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24442. poly16x8x2_t __s1 = __p1; \
  24443. poly16x8x2_t __rev1; \
  24444. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24445. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24446. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
  24447. })
  24448. #endif
  24449. #ifdef __LITTLE_ENDIAN__
  24450. #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24451. uint32x4x2_t __s1 = __p1; \
  24452. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
  24453. })
  24454. #else
  24455. #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24456. uint32x4x2_t __s1 = __p1; \
  24457. uint32x4x2_t __rev1; \
  24458. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24459. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24460. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
  24461. })
  24462. #endif
  24463. #ifdef __LITTLE_ENDIAN__
  24464. #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24465. uint16x8x2_t __s1 = __p1; \
  24466. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
  24467. })
  24468. #else
  24469. #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24470. uint16x8x2_t __s1 = __p1; \
  24471. uint16x8x2_t __rev1; \
  24472. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24473. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24474. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
  24475. })
  24476. #endif
  24477. #ifdef __LITTLE_ENDIAN__
  24478. #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24479. float32x4x2_t __s1 = __p1; \
  24480. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
  24481. })
  24482. #else
  24483. #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24484. float32x4x2_t __s1 = __p1; \
  24485. float32x4x2_t __rev1; \
  24486. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24487. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24488. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
  24489. })
  24490. #endif
  24491. #ifdef __LITTLE_ENDIAN__
  24492. #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24493. float16x8x2_t __s1 = __p1; \
  24494. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
  24495. })
  24496. #else
  24497. #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24498. float16x8x2_t __s1 = __p1; \
  24499. float16x8x2_t __rev1; \
  24500. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24501. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24502. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
  24503. })
  24504. #endif
  24505. #ifdef __LITTLE_ENDIAN__
  24506. #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24507. int32x4x2_t __s1 = __p1; \
  24508. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
  24509. })
  24510. #else
  24511. #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24512. int32x4x2_t __s1 = __p1; \
  24513. int32x4x2_t __rev1; \
  24514. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24515. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24516. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
  24517. })
  24518. #endif
  24519. #ifdef __LITTLE_ENDIAN__
  24520. #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24521. int16x8x2_t __s1 = __p1; \
  24522. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
  24523. })
  24524. #else
  24525. #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24526. int16x8x2_t __s1 = __p1; \
  24527. int16x8x2_t __rev1; \
  24528. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24529. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24530. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
  24531. })
  24532. #endif
  24533. #ifdef __LITTLE_ENDIAN__
  24534. #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  24535. uint8x8x2_t __s1 = __p1; \
  24536. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
  24537. })
  24538. #else
  24539. #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  24540. uint8x8x2_t __s1 = __p1; \
  24541. uint8x8x2_t __rev1; \
  24542. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24543. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24544. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
  24545. })
  24546. #endif
  24547. #ifdef __LITTLE_ENDIAN__
  24548. #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24549. uint32x2x2_t __s1 = __p1; \
  24550. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
  24551. })
  24552. #else
  24553. #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  24554. uint32x2x2_t __s1 = __p1; \
  24555. uint32x2x2_t __rev1; \
  24556. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24557. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24558. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
  24559. })
  24560. #endif
  24561. #ifdef __LITTLE_ENDIAN__
  24562. #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24563. uint16x4x2_t __s1 = __p1; \
  24564. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
  24565. })
  24566. #else
  24567. #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  24568. uint16x4x2_t __s1 = __p1; \
  24569. uint16x4x2_t __rev1; \
  24570. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24571. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24572. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
  24573. })
  24574. #endif
  24575. #ifdef __LITTLE_ENDIAN__
  24576. #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  24577. int8x8x2_t __s1 = __p1; \
  24578. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
  24579. })
  24580. #else
  24581. #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  24582. int8x8x2_t __s1 = __p1; \
  24583. int8x8x2_t __rev1; \
  24584. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24585. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24586. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
  24587. })
  24588. #endif
  24589. #ifdef __LITTLE_ENDIAN__
  24590. #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24591. float32x2x2_t __s1 = __p1; \
  24592. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
  24593. })
  24594. #else
  24595. #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  24596. float32x2x2_t __s1 = __p1; \
  24597. float32x2x2_t __rev1; \
  24598. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24599. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24600. __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
  24601. })
  24602. #endif
  24603. #ifdef __LITTLE_ENDIAN__
  24604. #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24605. float16x4x2_t __s1 = __p1; \
  24606. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
  24607. })
  24608. #else
  24609. #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  24610. float16x4x2_t __s1 = __p1; \
  24611. float16x4x2_t __rev1; \
  24612. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24613. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24614. __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
  24615. })
  24616. #endif
  24617. #ifdef __LITTLE_ENDIAN__
  24618. #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24619. int32x2x2_t __s1 = __p1; \
  24620. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
  24621. })
  24622. #else
  24623. #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  24624. int32x2x2_t __s1 = __p1; \
  24625. int32x2x2_t __rev1; \
  24626. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24627. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24628. __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
  24629. })
  24630. #endif
  24631. #ifdef __LITTLE_ENDIAN__
  24632. #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24633. int16x4x2_t __s1 = __p1; \
  24634. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
  24635. })
  24636. #else
  24637. #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  24638. int16x4x2_t __s1 = __p1; \
  24639. int16x4x2_t __rev1; \
  24640. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24641. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24642. __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
  24643. })
  24644. #endif
  24645. #ifdef __LITTLE_ENDIAN__
  24646. #define vst3_p8(__p0, __p1) __extension__ ({ \
  24647. poly8x8x3_t __s1 = __p1; \
  24648. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
  24649. })
  24650. #else
  24651. #define vst3_p8(__p0, __p1) __extension__ ({ \
  24652. poly8x8x3_t __s1 = __p1; \
  24653. poly8x8x3_t __rev1; \
  24654. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24655. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24656. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24657. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
  24658. })
  24659. #endif
  24660. #ifdef __LITTLE_ENDIAN__
  24661. #define vst3_p16(__p0, __p1) __extension__ ({ \
  24662. poly16x4x3_t __s1 = __p1; \
  24663. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
  24664. })
  24665. #else
  24666. #define vst3_p16(__p0, __p1) __extension__ ({ \
  24667. poly16x4x3_t __s1 = __p1; \
  24668. poly16x4x3_t __rev1; \
  24669. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24670. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24671. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24672. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
  24673. })
  24674. #endif
  24675. #ifdef __LITTLE_ENDIAN__
  24676. #define vst3q_p8(__p0, __p1) __extension__ ({ \
  24677. poly8x16x3_t __s1 = __p1; \
  24678. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
  24679. })
  24680. #else
  24681. #define vst3q_p8(__p0, __p1) __extension__ ({ \
  24682. poly8x16x3_t __s1 = __p1; \
  24683. poly8x16x3_t __rev1; \
  24684. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24685. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24686. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24687. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
  24688. })
  24689. #endif
  24690. #ifdef __LITTLE_ENDIAN__
  24691. #define vst3q_p16(__p0, __p1) __extension__ ({ \
  24692. poly16x8x3_t __s1 = __p1; \
  24693. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
  24694. })
  24695. #else
  24696. #define vst3q_p16(__p0, __p1) __extension__ ({ \
  24697. poly16x8x3_t __s1 = __p1; \
  24698. poly16x8x3_t __rev1; \
  24699. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24700. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24701. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24702. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
  24703. })
  24704. #endif
  24705. #ifdef __LITTLE_ENDIAN__
  24706. #define vst3q_u8(__p0, __p1) __extension__ ({ \
  24707. uint8x16x3_t __s1 = __p1; \
  24708. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
  24709. })
  24710. #else
  24711. #define vst3q_u8(__p0, __p1) __extension__ ({ \
  24712. uint8x16x3_t __s1 = __p1; \
  24713. uint8x16x3_t __rev1; \
  24714. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24715. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24716. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24717. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
  24718. })
  24719. #endif
  24720. #ifdef __LITTLE_ENDIAN__
  24721. #define vst3q_u32(__p0, __p1) __extension__ ({ \
  24722. uint32x4x3_t __s1 = __p1; \
  24723. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
  24724. })
  24725. #else
  24726. #define vst3q_u32(__p0, __p1) __extension__ ({ \
  24727. uint32x4x3_t __s1 = __p1; \
  24728. uint32x4x3_t __rev1; \
  24729. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24730. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24731. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24732. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
  24733. })
  24734. #endif
  24735. #ifdef __LITTLE_ENDIAN__
  24736. #define vst3q_u16(__p0, __p1) __extension__ ({ \
  24737. uint16x8x3_t __s1 = __p1; \
  24738. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
  24739. })
  24740. #else
  24741. #define vst3q_u16(__p0, __p1) __extension__ ({ \
  24742. uint16x8x3_t __s1 = __p1; \
  24743. uint16x8x3_t __rev1; \
  24744. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24745. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24746. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24747. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
  24748. })
  24749. #endif
  24750. #ifdef __LITTLE_ENDIAN__
  24751. #define vst3q_s8(__p0, __p1) __extension__ ({ \
  24752. int8x16x3_t __s1 = __p1; \
  24753. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
  24754. })
  24755. #else
  24756. #define vst3q_s8(__p0, __p1) __extension__ ({ \
  24757. int8x16x3_t __s1 = __p1; \
  24758. int8x16x3_t __rev1; \
  24759. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24760. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24761. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  24762. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
  24763. })
  24764. #endif
  24765. #ifdef __LITTLE_ENDIAN__
  24766. #define vst3q_f32(__p0, __p1) __extension__ ({ \
  24767. float32x4x3_t __s1 = __p1; \
  24768. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
  24769. })
  24770. #else
  24771. #define vst3q_f32(__p0, __p1) __extension__ ({ \
  24772. float32x4x3_t __s1 = __p1; \
  24773. float32x4x3_t __rev1; \
  24774. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24775. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24776. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24777. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
  24778. })
  24779. #endif
  24780. #ifdef __LITTLE_ENDIAN__
  24781. #define vst3q_f16(__p0, __p1) __extension__ ({ \
  24782. float16x8x3_t __s1 = __p1; \
  24783. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
  24784. })
  24785. #else
  24786. #define vst3q_f16(__p0, __p1) __extension__ ({ \
  24787. float16x8x3_t __s1 = __p1; \
  24788. float16x8x3_t __rev1; \
  24789. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24790. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24791. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24792. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
  24793. })
  24794. #endif
  24795. #ifdef __LITTLE_ENDIAN__
  24796. #define vst3q_s32(__p0, __p1) __extension__ ({ \
  24797. int32x4x3_t __s1 = __p1; \
  24798. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
  24799. })
  24800. #else
  24801. #define vst3q_s32(__p0, __p1) __extension__ ({ \
  24802. int32x4x3_t __s1 = __p1; \
  24803. int32x4x3_t __rev1; \
  24804. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24805. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24806. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24807. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
  24808. })
  24809. #endif
  24810. #ifdef __LITTLE_ENDIAN__
  24811. #define vst3q_s16(__p0, __p1) __extension__ ({ \
  24812. int16x8x3_t __s1 = __p1; \
  24813. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
  24814. })
  24815. #else
  24816. #define vst3q_s16(__p0, __p1) __extension__ ({ \
  24817. int16x8x3_t __s1 = __p1; \
  24818. int16x8x3_t __rev1; \
  24819. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24820. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24821. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24822. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
  24823. })
  24824. #endif
  24825. #ifdef __LITTLE_ENDIAN__
  24826. #define vst3_u8(__p0, __p1) __extension__ ({ \
  24827. uint8x8x3_t __s1 = __p1; \
  24828. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
  24829. })
  24830. #else
  24831. #define vst3_u8(__p0, __p1) __extension__ ({ \
  24832. uint8x8x3_t __s1 = __p1; \
  24833. uint8x8x3_t __rev1; \
  24834. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24835. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24836. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24837. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
  24838. })
  24839. #endif
  24840. #ifdef __LITTLE_ENDIAN__
  24841. #define vst3_u32(__p0, __p1) __extension__ ({ \
  24842. uint32x2x3_t __s1 = __p1; \
  24843. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
  24844. })
  24845. #else
  24846. #define vst3_u32(__p0, __p1) __extension__ ({ \
  24847. uint32x2x3_t __s1 = __p1; \
  24848. uint32x2x3_t __rev1; \
  24849. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24850. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24851. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  24852. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
  24853. })
  24854. #endif
  24855. #ifdef __LITTLE_ENDIAN__
  24856. #define vst3_u64(__p0, __p1) __extension__ ({ \
  24857. uint64x1x3_t __s1 = __p1; \
  24858. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
  24859. })
  24860. #else
  24861. #define vst3_u64(__p0, __p1) __extension__ ({ \
  24862. uint64x1x3_t __s1 = __p1; \
  24863. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
  24864. })
  24865. #endif
  24866. #ifdef __LITTLE_ENDIAN__
  24867. #define vst3_u16(__p0, __p1) __extension__ ({ \
  24868. uint16x4x3_t __s1 = __p1; \
  24869. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
  24870. })
  24871. #else
  24872. #define vst3_u16(__p0, __p1) __extension__ ({ \
  24873. uint16x4x3_t __s1 = __p1; \
  24874. uint16x4x3_t __rev1; \
  24875. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24876. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24877. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24878. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
  24879. })
  24880. #endif
  24881. #ifdef __LITTLE_ENDIAN__
  24882. #define vst3_s8(__p0, __p1) __extension__ ({ \
  24883. int8x8x3_t __s1 = __p1; \
  24884. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
  24885. })
  24886. #else
  24887. #define vst3_s8(__p0, __p1) __extension__ ({ \
  24888. int8x8x3_t __s1 = __p1; \
  24889. int8x8x3_t __rev1; \
  24890. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24891. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24892. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24893. __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
  24894. })
  24895. #endif
  24896. #ifdef __LITTLE_ENDIAN__
  24897. #define vst3_f32(__p0, __p1) __extension__ ({ \
  24898. float32x2x3_t __s1 = __p1; \
  24899. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
  24900. })
  24901. #else
  24902. #define vst3_f32(__p0, __p1) __extension__ ({ \
  24903. float32x2x3_t __s1 = __p1; \
  24904. float32x2x3_t __rev1; \
  24905. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24906. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24907. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  24908. __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
  24909. })
  24910. #endif
  24911. #ifdef __LITTLE_ENDIAN__
  24912. #define vst3_f16(__p0, __p1) __extension__ ({ \
  24913. float16x4x3_t __s1 = __p1; \
  24914. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
  24915. })
  24916. #else
  24917. #define vst3_f16(__p0, __p1) __extension__ ({ \
  24918. float16x4x3_t __s1 = __p1; \
  24919. float16x4x3_t __rev1; \
  24920. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24921. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24922. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24923. __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
  24924. })
  24925. #endif
  24926. #ifdef __LITTLE_ENDIAN__
  24927. #define vst3_s32(__p0, __p1) __extension__ ({ \
  24928. int32x2x3_t __s1 = __p1; \
  24929. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
  24930. })
  24931. #else
  24932. #define vst3_s32(__p0, __p1) __extension__ ({ \
  24933. int32x2x3_t __s1 = __p1; \
  24934. int32x2x3_t __rev1; \
  24935. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  24936. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  24937. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  24938. __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
  24939. })
  24940. #endif
  24941. #ifdef __LITTLE_ENDIAN__
  24942. #define vst3_s64(__p0, __p1) __extension__ ({ \
  24943. int64x1x3_t __s1 = __p1; \
  24944. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
  24945. })
  24946. #else
  24947. #define vst3_s64(__p0, __p1) __extension__ ({ \
  24948. int64x1x3_t __s1 = __p1; \
  24949. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
  24950. })
  24951. #endif
  24952. #ifdef __LITTLE_ENDIAN__
  24953. #define vst3_s16(__p0, __p1) __extension__ ({ \
  24954. int16x4x3_t __s1 = __p1; \
  24955. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
  24956. })
  24957. #else
  24958. #define vst3_s16(__p0, __p1) __extension__ ({ \
  24959. int16x4x3_t __s1 = __p1; \
  24960. int16x4x3_t __rev1; \
  24961. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24962. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24963. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24964. __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
  24965. })
  24966. #endif
  24967. #ifdef __LITTLE_ENDIAN__
  24968. #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  24969. poly8x8x3_t __s1 = __p1; \
  24970. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
  24971. })
  24972. #else
  24973. #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  24974. poly8x8x3_t __s1 = __p1; \
  24975. poly8x8x3_t __rev1; \
  24976. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  24977. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  24978. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  24979. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
  24980. })
  24981. #endif
  24982. #ifdef __LITTLE_ENDIAN__
  24983. #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24984. poly16x4x3_t __s1 = __p1; \
  24985. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
  24986. })
  24987. #else
  24988. #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24989. poly16x4x3_t __s1 = __p1; \
  24990. poly16x4x3_t __rev1; \
  24991. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  24992. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  24993. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  24994. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
  24995. })
  24996. #endif
  24997. #ifdef __LITTLE_ENDIAN__
  24998. #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  24999. poly16x8x3_t __s1 = __p1; \
  25000. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
  25001. })
  25002. #else
  25003. #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  25004. poly16x8x3_t __s1 = __p1; \
  25005. poly16x8x3_t __rev1; \
  25006. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25007. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25008. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25009. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
  25010. })
  25011. #endif
  25012. #ifdef __LITTLE_ENDIAN__
  25013. #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25014. uint32x4x3_t __s1 = __p1; \
  25015. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
  25016. })
  25017. #else
  25018. #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25019. uint32x4x3_t __s1 = __p1; \
  25020. uint32x4x3_t __rev1; \
  25021. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25022. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25023. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25024. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
  25025. })
  25026. #endif
  25027. #ifdef __LITTLE_ENDIAN__
  25028. #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25029. uint16x8x3_t __s1 = __p1; \
  25030. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
  25031. })
  25032. #else
  25033. #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25034. uint16x8x3_t __s1 = __p1; \
  25035. uint16x8x3_t __rev1; \
  25036. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25037. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25038. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25039. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
  25040. })
  25041. #endif
  25042. #ifdef __LITTLE_ENDIAN__
  25043. #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25044. float32x4x3_t __s1 = __p1; \
  25045. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
  25046. })
  25047. #else
  25048. #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25049. float32x4x3_t __s1 = __p1; \
  25050. float32x4x3_t __rev1; \
  25051. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25052. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25053. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25054. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
  25055. })
  25056. #endif
  25057. #ifdef __LITTLE_ENDIAN__
  25058. #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25059. float16x8x3_t __s1 = __p1; \
  25060. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
  25061. })
  25062. #else
  25063. #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25064. float16x8x3_t __s1 = __p1; \
  25065. float16x8x3_t __rev1; \
  25066. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25067. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25068. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25069. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
  25070. })
  25071. #endif
  25072. #ifdef __LITTLE_ENDIAN__
  25073. #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25074. int32x4x3_t __s1 = __p1; \
  25075. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
  25076. })
  25077. #else
  25078. #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25079. int32x4x3_t __s1 = __p1; \
  25080. int32x4x3_t __rev1; \
  25081. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25082. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25083. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25084. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
  25085. })
  25086. #endif
  25087. #ifdef __LITTLE_ENDIAN__
  25088. #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25089. int16x8x3_t __s1 = __p1; \
  25090. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
  25091. })
  25092. #else
  25093. #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25094. int16x8x3_t __s1 = __p1; \
  25095. int16x8x3_t __rev1; \
  25096. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25097. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25098. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25099. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
  25100. })
  25101. #endif
  25102. #ifdef __LITTLE_ENDIAN__
  25103. #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  25104. uint8x8x3_t __s1 = __p1; \
  25105. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
  25106. })
  25107. #else
  25108. #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  25109. uint8x8x3_t __s1 = __p1; \
  25110. uint8x8x3_t __rev1; \
  25111. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25112. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25113. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25114. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
  25115. })
  25116. #endif
  25117. #ifdef __LITTLE_ENDIAN__
  25118. #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25119. uint32x2x3_t __s1 = __p1; \
  25120. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
  25121. })
  25122. #else
  25123. #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25124. uint32x2x3_t __s1 = __p1; \
  25125. uint32x2x3_t __rev1; \
  25126. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25127. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25128. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25129. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
  25130. })
  25131. #endif
  25132. #ifdef __LITTLE_ENDIAN__
  25133. #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25134. uint16x4x3_t __s1 = __p1; \
  25135. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
  25136. })
  25137. #else
  25138. #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25139. uint16x4x3_t __s1 = __p1; \
  25140. uint16x4x3_t __rev1; \
  25141. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25142. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25143. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25144. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
  25145. })
  25146. #endif
  25147. #ifdef __LITTLE_ENDIAN__
  25148. #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  25149. int8x8x3_t __s1 = __p1; \
  25150. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
  25151. })
  25152. #else
  25153. #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  25154. int8x8x3_t __s1 = __p1; \
  25155. int8x8x3_t __rev1; \
  25156. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25157. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25158. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25159. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
  25160. })
  25161. #endif
  25162. #ifdef __LITTLE_ENDIAN__
  25163. #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25164. float32x2x3_t __s1 = __p1; \
  25165. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
  25166. })
  25167. #else
  25168. #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25169. float32x2x3_t __s1 = __p1; \
  25170. float32x2x3_t __rev1; \
  25171. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25172. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25173. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25174. __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
  25175. })
  25176. #endif
  25177. #ifdef __LITTLE_ENDIAN__
  25178. #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25179. float16x4x3_t __s1 = __p1; \
  25180. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
  25181. })
  25182. #else
  25183. #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25184. float16x4x3_t __s1 = __p1; \
  25185. float16x4x3_t __rev1; \
  25186. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25187. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25188. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25189. __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
  25190. })
  25191. #endif
  25192. #ifdef __LITTLE_ENDIAN__
  25193. #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25194. int32x2x3_t __s1 = __p1; \
  25195. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
  25196. })
  25197. #else
  25198. #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25199. int32x2x3_t __s1 = __p1; \
  25200. int32x2x3_t __rev1; \
  25201. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25202. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25203. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25204. __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
  25205. })
  25206. #endif
  25207. #ifdef __LITTLE_ENDIAN__
  25208. #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25209. int16x4x3_t __s1 = __p1; \
  25210. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
  25211. })
  25212. #else
  25213. #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25214. int16x4x3_t __s1 = __p1; \
  25215. int16x4x3_t __rev1; \
  25216. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25217. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25218. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25219. __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
  25220. })
  25221. #endif
  25222. #ifdef __LITTLE_ENDIAN__
  25223. #define vst4_p8(__p0, __p1) __extension__ ({ \
  25224. poly8x8x4_t __s1 = __p1; \
  25225. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
  25226. })
  25227. #else
  25228. #define vst4_p8(__p0, __p1) __extension__ ({ \
  25229. poly8x8x4_t __s1 = __p1; \
  25230. poly8x8x4_t __rev1; \
  25231. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25232. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25233. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25234. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25235. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
  25236. })
  25237. #endif
  25238. #ifdef __LITTLE_ENDIAN__
  25239. #define vst4_p16(__p0, __p1) __extension__ ({ \
  25240. poly16x4x4_t __s1 = __p1; \
  25241. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
  25242. })
  25243. #else
  25244. #define vst4_p16(__p0, __p1) __extension__ ({ \
  25245. poly16x4x4_t __s1 = __p1; \
  25246. poly16x4x4_t __rev1; \
  25247. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25248. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25249. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25250. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25251. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
  25252. })
  25253. #endif
  25254. #ifdef __LITTLE_ENDIAN__
  25255. #define vst4q_p8(__p0, __p1) __extension__ ({ \
  25256. poly8x16x4_t __s1 = __p1; \
  25257. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
  25258. })
  25259. #else
  25260. #define vst4q_p8(__p0, __p1) __extension__ ({ \
  25261. poly8x16x4_t __s1 = __p1; \
  25262. poly8x16x4_t __rev1; \
  25263. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25264. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25265. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25266. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25267. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
  25268. })
  25269. #endif
  25270. #ifdef __LITTLE_ENDIAN__
  25271. #define vst4q_p16(__p0, __p1) __extension__ ({ \
  25272. poly16x8x4_t __s1 = __p1; \
  25273. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
  25274. })
  25275. #else
  25276. #define vst4q_p16(__p0, __p1) __extension__ ({ \
  25277. poly16x8x4_t __s1 = __p1; \
  25278. poly16x8x4_t __rev1; \
  25279. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25280. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25281. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25282. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25283. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
  25284. })
  25285. #endif
  25286. #ifdef __LITTLE_ENDIAN__
  25287. #define vst4q_u8(__p0, __p1) __extension__ ({ \
  25288. uint8x16x4_t __s1 = __p1; \
  25289. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
  25290. })
  25291. #else
  25292. #define vst4q_u8(__p0, __p1) __extension__ ({ \
  25293. uint8x16x4_t __s1 = __p1; \
  25294. uint8x16x4_t __rev1; \
  25295. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25296. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25297. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25298. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25299. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
  25300. })
  25301. #endif
  25302. #ifdef __LITTLE_ENDIAN__
  25303. #define vst4q_u32(__p0, __p1) __extension__ ({ \
  25304. uint32x4x4_t __s1 = __p1; \
  25305. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
  25306. })
  25307. #else
  25308. #define vst4q_u32(__p0, __p1) __extension__ ({ \
  25309. uint32x4x4_t __s1 = __p1; \
  25310. uint32x4x4_t __rev1; \
  25311. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25312. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25313. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25314. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25315. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
  25316. })
  25317. #endif
  25318. #ifdef __LITTLE_ENDIAN__
  25319. #define vst4q_u16(__p0, __p1) __extension__ ({ \
  25320. uint16x8x4_t __s1 = __p1; \
  25321. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
  25322. })
  25323. #else
  25324. #define vst4q_u16(__p0, __p1) __extension__ ({ \
  25325. uint16x8x4_t __s1 = __p1; \
  25326. uint16x8x4_t __rev1; \
  25327. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25328. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25329. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25330. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25331. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
  25332. })
  25333. #endif
  25334. #ifdef __LITTLE_ENDIAN__
  25335. #define vst4q_s8(__p0, __p1) __extension__ ({ \
  25336. int8x16x4_t __s1 = __p1; \
  25337. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
  25338. })
  25339. #else
  25340. #define vst4q_s8(__p0, __p1) __extension__ ({ \
  25341. int8x16x4_t __s1 = __p1; \
  25342. int8x16x4_t __rev1; \
  25343. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25344. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25345. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25346. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  25347. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
  25348. })
  25349. #endif
  25350. #ifdef __LITTLE_ENDIAN__
  25351. #define vst4q_f32(__p0, __p1) __extension__ ({ \
  25352. float32x4x4_t __s1 = __p1; \
  25353. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
  25354. })
  25355. #else
  25356. #define vst4q_f32(__p0, __p1) __extension__ ({ \
  25357. float32x4x4_t __s1 = __p1; \
  25358. float32x4x4_t __rev1; \
  25359. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25360. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25361. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25362. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25363. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
  25364. })
  25365. #endif
  25366. #ifdef __LITTLE_ENDIAN__
  25367. #define vst4q_f16(__p0, __p1) __extension__ ({ \
  25368. float16x8x4_t __s1 = __p1; \
  25369. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
  25370. })
  25371. #else
  25372. #define vst4q_f16(__p0, __p1) __extension__ ({ \
  25373. float16x8x4_t __s1 = __p1; \
  25374. float16x8x4_t __rev1; \
  25375. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25376. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25377. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25378. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25379. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
  25380. })
  25381. #endif
  25382. #ifdef __LITTLE_ENDIAN__
  25383. #define vst4q_s32(__p0, __p1) __extension__ ({ \
  25384. int32x4x4_t __s1 = __p1; \
  25385. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
  25386. })
  25387. #else
  25388. #define vst4q_s32(__p0, __p1) __extension__ ({ \
  25389. int32x4x4_t __s1 = __p1; \
  25390. int32x4x4_t __rev1; \
  25391. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25392. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25393. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25394. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25395. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
  25396. })
  25397. #endif
  25398. #ifdef __LITTLE_ENDIAN__
  25399. #define vst4q_s16(__p0, __p1) __extension__ ({ \
  25400. int16x8x4_t __s1 = __p1; \
  25401. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
  25402. })
  25403. #else
  25404. #define vst4q_s16(__p0, __p1) __extension__ ({ \
  25405. int16x8x4_t __s1 = __p1; \
  25406. int16x8x4_t __rev1; \
  25407. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25408. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25409. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25410. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25411. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
  25412. })
  25413. #endif
  25414. #ifdef __LITTLE_ENDIAN__
  25415. #define vst4_u8(__p0, __p1) __extension__ ({ \
  25416. uint8x8x4_t __s1 = __p1; \
  25417. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
  25418. })
  25419. #else
  25420. #define vst4_u8(__p0, __p1) __extension__ ({ \
  25421. uint8x8x4_t __s1 = __p1; \
  25422. uint8x8x4_t __rev1; \
  25423. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25424. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25425. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25426. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25427. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
  25428. })
  25429. #endif
  25430. #ifdef __LITTLE_ENDIAN__
  25431. #define vst4_u32(__p0, __p1) __extension__ ({ \
  25432. uint32x2x4_t __s1 = __p1; \
  25433. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
  25434. })
  25435. #else
  25436. #define vst4_u32(__p0, __p1) __extension__ ({ \
  25437. uint32x2x4_t __s1 = __p1; \
  25438. uint32x2x4_t __rev1; \
  25439. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25440. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25441. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25442. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25443. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
  25444. })
  25445. #endif
  25446. #ifdef __LITTLE_ENDIAN__
  25447. #define vst4_u64(__p0, __p1) __extension__ ({ \
  25448. uint64x1x4_t __s1 = __p1; \
  25449. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
  25450. })
  25451. #else
  25452. #define vst4_u64(__p0, __p1) __extension__ ({ \
  25453. uint64x1x4_t __s1 = __p1; \
  25454. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
  25455. })
  25456. #endif
  25457. #ifdef __LITTLE_ENDIAN__
  25458. #define vst4_u16(__p0, __p1) __extension__ ({ \
  25459. uint16x4x4_t __s1 = __p1; \
  25460. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
  25461. })
  25462. #else
  25463. #define vst4_u16(__p0, __p1) __extension__ ({ \
  25464. uint16x4x4_t __s1 = __p1; \
  25465. uint16x4x4_t __rev1; \
  25466. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25467. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25468. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25469. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25470. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
  25471. })
  25472. #endif
  25473. #ifdef __LITTLE_ENDIAN__
  25474. #define vst4_s8(__p0, __p1) __extension__ ({ \
  25475. int8x8x4_t __s1 = __p1; \
  25476. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
  25477. })
  25478. #else
  25479. #define vst4_s8(__p0, __p1) __extension__ ({ \
  25480. int8x8x4_t __s1 = __p1; \
  25481. int8x8x4_t __rev1; \
  25482. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25483. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25484. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25485. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25486. __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
  25487. })
  25488. #endif
  25489. #ifdef __LITTLE_ENDIAN__
  25490. #define vst4_f32(__p0, __p1) __extension__ ({ \
  25491. float32x2x4_t __s1 = __p1; \
  25492. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
  25493. })
  25494. #else
  25495. #define vst4_f32(__p0, __p1) __extension__ ({ \
  25496. float32x2x4_t __s1 = __p1; \
  25497. float32x2x4_t __rev1; \
  25498. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25499. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25500. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25501. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25502. __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
  25503. })
  25504. #endif
  25505. #ifdef __LITTLE_ENDIAN__
  25506. #define vst4_f16(__p0, __p1) __extension__ ({ \
  25507. float16x4x4_t __s1 = __p1; \
  25508. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
  25509. })
  25510. #else
  25511. #define vst4_f16(__p0, __p1) __extension__ ({ \
  25512. float16x4x4_t __s1 = __p1; \
  25513. float16x4x4_t __rev1; \
  25514. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25515. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25516. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25517. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25518. __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
  25519. })
  25520. #endif
  25521. #ifdef __LITTLE_ENDIAN__
  25522. #define vst4_s32(__p0, __p1) __extension__ ({ \
  25523. int32x2x4_t __s1 = __p1; \
  25524. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
  25525. })
  25526. #else
  25527. #define vst4_s32(__p0, __p1) __extension__ ({ \
  25528. int32x2x4_t __s1 = __p1; \
  25529. int32x2x4_t __rev1; \
  25530. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25531. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25532. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25533. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25534. __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
  25535. })
  25536. #endif
  25537. #ifdef __LITTLE_ENDIAN__
  25538. #define vst4_s64(__p0, __p1) __extension__ ({ \
  25539. int64x1x4_t __s1 = __p1; \
  25540. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
  25541. })
  25542. #else
  25543. #define vst4_s64(__p0, __p1) __extension__ ({ \
  25544. int64x1x4_t __s1 = __p1; \
  25545. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
  25546. })
  25547. #endif
  25548. #ifdef __LITTLE_ENDIAN__
  25549. #define vst4_s16(__p0, __p1) __extension__ ({ \
  25550. int16x4x4_t __s1 = __p1; \
  25551. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
  25552. })
  25553. #else
  25554. #define vst4_s16(__p0, __p1) __extension__ ({ \
  25555. int16x4x4_t __s1 = __p1; \
  25556. int16x4x4_t __rev1; \
  25557. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25558. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25559. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25560. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25561. __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
  25562. })
  25563. #endif
  25564. #ifdef __LITTLE_ENDIAN__
  25565. #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  25566. poly8x8x4_t __s1 = __p1; \
  25567. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
  25568. })
  25569. #else
  25570. #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  25571. poly8x8x4_t __s1 = __p1; \
  25572. poly8x8x4_t __rev1; \
  25573. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25574. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25575. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25576. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25577. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
  25578. })
  25579. #endif
  25580. #ifdef __LITTLE_ENDIAN__
  25581. #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  25582. poly16x4x4_t __s1 = __p1; \
  25583. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
  25584. })
  25585. #else
  25586. #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  25587. poly16x4x4_t __s1 = __p1; \
  25588. poly16x4x4_t __rev1; \
  25589. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25590. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25591. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25592. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25593. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
  25594. })
  25595. #endif
  25596. #ifdef __LITTLE_ENDIAN__
  25597. #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  25598. poly16x8x4_t __s1 = __p1; \
  25599. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
  25600. })
  25601. #else
  25602. #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
  25603. poly16x8x4_t __s1 = __p1; \
  25604. poly16x8x4_t __rev1; \
  25605. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25606. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25607. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25608. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25609. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
  25610. })
  25611. #endif
  25612. #ifdef __LITTLE_ENDIAN__
  25613. #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25614. uint32x4x4_t __s1 = __p1; \
  25615. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
  25616. })
  25617. #else
  25618. #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25619. uint32x4x4_t __s1 = __p1; \
  25620. uint32x4x4_t __rev1; \
  25621. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25622. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25623. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25624. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25625. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
  25626. })
  25627. #endif
  25628. #ifdef __LITTLE_ENDIAN__
  25629. #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25630. uint16x8x4_t __s1 = __p1; \
  25631. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
  25632. })
  25633. #else
  25634. #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25635. uint16x8x4_t __s1 = __p1; \
  25636. uint16x8x4_t __rev1; \
  25637. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25638. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25639. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25640. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25641. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
  25642. })
  25643. #endif
  25644. #ifdef __LITTLE_ENDIAN__
  25645. #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25646. float32x4x4_t __s1 = __p1; \
  25647. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
  25648. })
  25649. #else
  25650. #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25651. float32x4x4_t __s1 = __p1; \
  25652. float32x4x4_t __rev1; \
  25653. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25654. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25655. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25656. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25657. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
  25658. })
  25659. #endif
  25660. #ifdef __LITTLE_ENDIAN__
  25661. #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25662. float16x8x4_t __s1 = __p1; \
  25663. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
  25664. })
  25665. #else
  25666. #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25667. float16x8x4_t __s1 = __p1; \
  25668. float16x8x4_t __rev1; \
  25669. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25670. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25671. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25672. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25673. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
  25674. })
  25675. #endif
  25676. #ifdef __LITTLE_ENDIAN__
  25677. #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25678. int32x4x4_t __s1 = __p1; \
  25679. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
  25680. })
  25681. #else
  25682. #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25683. int32x4x4_t __s1 = __p1; \
  25684. int32x4x4_t __rev1; \
  25685. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25686. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25687. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25688. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25689. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
  25690. })
  25691. #endif
  25692. #ifdef __LITTLE_ENDIAN__
  25693. #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25694. int16x8x4_t __s1 = __p1; \
  25695. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
  25696. })
  25697. #else
  25698. #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25699. int16x8x4_t __s1 = __p1; \
  25700. int16x8x4_t __rev1; \
  25701. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25702. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25703. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25704. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25705. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
  25706. })
  25707. #endif
  25708. #ifdef __LITTLE_ENDIAN__
  25709. #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  25710. uint8x8x4_t __s1 = __p1; \
  25711. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
  25712. })
  25713. #else
  25714. #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  25715. uint8x8x4_t __s1 = __p1; \
  25716. uint8x8x4_t __rev1; \
  25717. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25718. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25719. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25720. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25721. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
  25722. })
  25723. #endif
  25724. #ifdef __LITTLE_ENDIAN__
  25725. #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25726. uint32x2x4_t __s1 = __p1; \
  25727. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
  25728. })
  25729. #else
  25730. #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  25731. uint32x2x4_t __s1 = __p1; \
  25732. uint32x2x4_t __rev1; \
  25733. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25734. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25735. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25736. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25737. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
  25738. })
  25739. #endif
  25740. #ifdef __LITTLE_ENDIAN__
  25741. #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25742. uint16x4x4_t __s1 = __p1; \
  25743. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
  25744. })
  25745. #else
  25746. #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  25747. uint16x4x4_t __s1 = __p1; \
  25748. uint16x4x4_t __rev1; \
  25749. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25750. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25751. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25752. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25753. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
  25754. })
  25755. #endif
  25756. #ifdef __LITTLE_ENDIAN__
  25757. #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  25758. int8x8x4_t __s1 = __p1; \
  25759. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
  25760. })
  25761. #else
  25762. #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  25763. int8x8x4_t __s1 = __p1; \
  25764. int8x8x4_t __rev1; \
  25765. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  25766. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  25767. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  25768. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  25769. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
  25770. })
  25771. #endif
  25772. #ifdef __LITTLE_ENDIAN__
  25773. #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25774. float32x2x4_t __s1 = __p1; \
  25775. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
  25776. })
  25777. #else
  25778. #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  25779. float32x2x4_t __s1 = __p1; \
  25780. float32x2x4_t __rev1; \
  25781. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25782. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25783. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25784. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25785. __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
  25786. })
  25787. #endif
  25788. #ifdef __LITTLE_ENDIAN__
  25789. #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25790. float16x4x4_t __s1 = __p1; \
  25791. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
  25792. })
  25793. #else
  25794. #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  25795. float16x4x4_t __s1 = __p1; \
  25796. float16x4x4_t __rev1; \
  25797. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25798. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25799. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25800. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25801. __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
  25802. })
  25803. #endif
  25804. #ifdef __LITTLE_ENDIAN__
  25805. #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25806. int32x2x4_t __s1 = __p1; \
  25807. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
  25808. })
  25809. #else
  25810. #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  25811. int32x2x4_t __s1 = __p1; \
  25812. int32x2x4_t __rev1; \
  25813. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  25814. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  25815. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  25816. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  25817. __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
  25818. })
  25819. #endif
  25820. #ifdef __LITTLE_ENDIAN__
  25821. #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25822. int16x4x4_t __s1 = __p1; \
  25823. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
  25824. })
  25825. #else
  25826. #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  25827. int16x4x4_t __s1 = __p1; \
  25828. int16x4x4_t __rev1; \
  25829. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  25830. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  25831. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  25832. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  25833. __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
  25834. })
  25835. #endif
  25836. #ifdef __LITTLE_ENDIAN__
  25837. __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  25838. uint8x16_t __ret;
  25839. __ret = __p0 - __p1;
  25840. return __ret;
  25841. }
  25842. #else
  25843. __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  25844. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25845. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25846. uint8x16_t __ret;
  25847. __ret = __rev0 - __rev1;
  25848. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25849. return __ret;
  25850. }
  25851. #endif
  25852. #ifdef __LITTLE_ENDIAN__
  25853. __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  25854. uint32x4_t __ret;
  25855. __ret = __p0 - __p1;
  25856. return __ret;
  25857. }
  25858. #else
  25859. __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  25860. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  25861. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  25862. uint32x4_t __ret;
  25863. __ret = __rev0 - __rev1;
  25864. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  25865. return __ret;
  25866. }
  25867. #endif
  25868. #ifdef __LITTLE_ENDIAN__
  25869. __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  25870. uint64x2_t __ret;
  25871. __ret = __p0 - __p1;
  25872. return __ret;
  25873. }
  25874. #else
  25875. __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  25876. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  25877. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  25878. uint64x2_t __ret;
  25879. __ret = __rev0 - __rev1;
  25880. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  25881. return __ret;
  25882. }
  25883. #endif
  25884. #ifdef __LITTLE_ENDIAN__
  25885. __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  25886. uint16x8_t __ret;
  25887. __ret = __p0 - __p1;
  25888. return __ret;
  25889. }
  25890. #else
  25891. __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  25892. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  25893. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  25894. uint16x8_t __ret;
  25895. __ret = __rev0 - __rev1;
  25896. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  25897. return __ret;
  25898. }
  25899. #endif
  25900. #ifdef __LITTLE_ENDIAN__
  25901. __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  25902. int8x16_t __ret;
  25903. __ret = __p0 - __p1;
  25904. return __ret;
  25905. }
  25906. #else
  25907. __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
  25908. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25909. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25910. int8x16_t __ret;
  25911. __ret = __rev0 - __rev1;
  25912. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  25913. return __ret;
  25914. }
  25915. #endif
  25916. #ifdef __LITTLE_ENDIAN__
  25917. __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
  25918. float32x4_t __ret;
  25919. __ret = __p0 - __p1;
  25920. return __ret;
  25921. }
  25922. #else
  25923. __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
  25924. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  25925. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  25926. float32x4_t __ret;
  25927. __ret = __rev0 - __rev1;
  25928. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  25929. return __ret;
  25930. }
  25931. #endif
  25932. #ifdef __LITTLE_ENDIAN__
  25933. __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  25934. int32x4_t __ret;
  25935. __ret = __p0 - __p1;
  25936. return __ret;
  25937. }
  25938. #else
  25939. __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
  25940. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  25941. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  25942. int32x4_t __ret;
  25943. __ret = __rev0 - __rev1;
  25944. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  25945. return __ret;
  25946. }
  25947. #endif
  25948. #ifdef __LITTLE_ENDIAN__
  25949. __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
  25950. int64x2_t __ret;
  25951. __ret = __p0 - __p1;
  25952. return __ret;
  25953. }
  25954. #else
  25955. __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
  25956. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  25957. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  25958. int64x2_t __ret;
  25959. __ret = __rev0 - __rev1;
  25960. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  25961. return __ret;
  25962. }
  25963. #endif
  25964. #ifdef __LITTLE_ENDIAN__
  25965. __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  25966. int16x8_t __ret;
  25967. __ret = __p0 - __p1;
  25968. return __ret;
  25969. }
  25970. #else
  25971. __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
  25972. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  25973. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  25974. int16x8_t __ret;
  25975. __ret = __rev0 - __rev1;
  25976. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  25977. return __ret;
  25978. }
  25979. #endif
  25980. #ifdef __LITTLE_ENDIAN__
  25981. __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  25982. uint8x8_t __ret;
  25983. __ret = __p0 - __p1;
  25984. return __ret;
  25985. }
  25986. #else
  25987. __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
  25988. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  25989. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  25990. uint8x8_t __ret;
  25991. __ret = __rev0 - __rev1;
  25992. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  25993. return __ret;
  25994. }
  25995. #endif
  25996. #ifdef __LITTLE_ENDIAN__
  25997. __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  25998. uint32x2_t __ret;
  25999. __ret = __p0 - __p1;
  26000. return __ret;
  26001. }
  26002. #else
  26003. __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
  26004. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26005. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26006. uint32x2_t __ret;
  26007. __ret = __rev0 - __rev1;
  26008. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26009. return __ret;
  26010. }
  26011. #endif
  26012. #ifdef __LITTLE_ENDIAN__
  26013. __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
  26014. uint64x1_t __ret;
  26015. __ret = __p0 - __p1;
  26016. return __ret;
  26017. }
  26018. #else
  26019. __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
  26020. uint64x1_t __ret;
  26021. __ret = __p0 - __p1;
  26022. return __ret;
  26023. }
  26024. #endif
  26025. #ifdef __LITTLE_ENDIAN__
  26026. __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  26027. uint16x4_t __ret;
  26028. __ret = __p0 - __p1;
  26029. return __ret;
  26030. }
  26031. #else
  26032. __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
  26033. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26034. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26035. uint16x4_t __ret;
  26036. __ret = __rev0 - __rev1;
  26037. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26038. return __ret;
  26039. }
  26040. #endif
  26041. #ifdef __LITTLE_ENDIAN__
  26042. __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
  26043. int8x8_t __ret;
  26044. __ret = __p0 - __p1;
  26045. return __ret;
  26046. }
  26047. #else
  26048. __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
  26049. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26050. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26051. int8x8_t __ret;
  26052. __ret = __rev0 - __rev1;
  26053. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26054. return __ret;
  26055. }
  26056. #endif
  26057. #ifdef __LITTLE_ENDIAN__
  26058. __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
  26059. float32x2_t __ret;
  26060. __ret = __p0 - __p1;
  26061. return __ret;
  26062. }
  26063. #else
  26064. __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
  26065. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26066. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26067. float32x2_t __ret;
  26068. __ret = __rev0 - __rev1;
  26069. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26070. return __ret;
  26071. }
  26072. #endif
  26073. #ifdef __LITTLE_ENDIAN__
  26074. __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
  26075. int32x2_t __ret;
  26076. __ret = __p0 - __p1;
  26077. return __ret;
  26078. }
  26079. #else
  26080. __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
  26081. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26082. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26083. int32x2_t __ret;
  26084. __ret = __rev0 - __rev1;
  26085. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26086. return __ret;
  26087. }
  26088. #endif
  26089. #ifdef __LITTLE_ENDIAN__
  26090. __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
  26091. int64x1_t __ret;
  26092. __ret = __p0 - __p1;
  26093. return __ret;
  26094. }
  26095. #else
  26096. __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
  26097. int64x1_t __ret;
  26098. __ret = __p0 - __p1;
  26099. return __ret;
  26100. }
  26101. #endif
  26102. #ifdef __LITTLE_ENDIAN__
  26103. __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
  26104. int16x4_t __ret;
  26105. __ret = __p0 - __p1;
  26106. return __ret;
  26107. }
  26108. #else
  26109. __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
  26110. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26111. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26112. int16x4_t __ret;
  26113. __ret = __rev0 - __rev1;
  26114. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26115. return __ret;
  26116. }
  26117. #endif
  26118. #ifdef __LITTLE_ENDIAN__
  26119. __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  26120. uint16x4_t __ret;
  26121. __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  26122. return __ret;
  26123. }
  26124. #else
  26125. __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  26126. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26127. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26128. uint16x4_t __ret;
  26129. __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
  26130. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26131. return __ret;
  26132. }
  26133. __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
  26134. uint16x4_t __ret;
  26135. __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
  26136. return __ret;
  26137. }
  26138. #endif
  26139. #ifdef __LITTLE_ENDIAN__
  26140. __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  26141. uint32x2_t __ret;
  26142. __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  26143. return __ret;
  26144. }
  26145. #else
  26146. __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  26147. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26148. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26149. uint32x2_t __ret;
  26150. __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
  26151. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26152. return __ret;
  26153. }
  26154. __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
  26155. uint32x2_t __ret;
  26156. __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
  26157. return __ret;
  26158. }
  26159. #endif
  26160. #ifdef __LITTLE_ENDIAN__
  26161. __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  26162. uint8x8_t __ret;
  26163. __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  26164. return __ret;
  26165. }
  26166. #else
  26167. __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  26168. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26169. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26170. uint8x8_t __ret;
  26171. __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
  26172. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26173. return __ret;
  26174. }
  26175. __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
  26176. uint8x8_t __ret;
  26177. __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
  26178. return __ret;
  26179. }
  26180. #endif
  26181. #ifdef __LITTLE_ENDIAN__
  26182. __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  26183. int16x4_t __ret;
  26184. __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  26185. return __ret;
  26186. }
  26187. #else
  26188. __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  26189. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26190. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26191. int16x4_t __ret;
  26192. __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
  26193. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26194. return __ret;
  26195. }
  26196. __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
  26197. int16x4_t __ret;
  26198. __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
  26199. return __ret;
  26200. }
  26201. #endif
  26202. #ifdef __LITTLE_ENDIAN__
  26203. __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  26204. int32x2_t __ret;
  26205. __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  26206. return __ret;
  26207. }
  26208. #else
  26209. __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  26210. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26211. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26212. int32x2_t __ret;
  26213. __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
  26214. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26215. return __ret;
  26216. }
  26217. __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
  26218. int32x2_t __ret;
  26219. __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
  26220. return __ret;
  26221. }
  26222. #endif
  26223. #ifdef __LITTLE_ENDIAN__
  26224. __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  26225. int8x8_t __ret;
  26226. __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  26227. return __ret;
  26228. }
  26229. #else
  26230. __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  26231. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26232. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26233. int8x8_t __ret;
  26234. __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
  26235. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26236. return __ret;
  26237. }
  26238. __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
  26239. int8x8_t __ret;
  26240. __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
  26241. return __ret;
  26242. }
  26243. #endif
  26244. #ifdef __LITTLE_ENDIAN__
  26245. __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  26246. uint16x8_t __ret;
  26247. __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
  26248. return __ret;
  26249. }
  26250. #else
  26251. __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  26252. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26253. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26254. uint16x8_t __ret;
  26255. __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
  26256. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26257. return __ret;
  26258. }
  26259. #endif
  26260. #ifdef __LITTLE_ENDIAN__
  26261. __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  26262. uint64x2_t __ret;
  26263. __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
  26264. return __ret;
  26265. }
  26266. #else
  26267. __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  26268. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26269. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26270. uint64x2_t __ret;
  26271. __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
  26272. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26273. return __ret;
  26274. }
  26275. #endif
  26276. #ifdef __LITTLE_ENDIAN__
  26277. __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  26278. uint32x4_t __ret;
  26279. __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
  26280. return __ret;
  26281. }
  26282. #else
  26283. __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  26284. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26285. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26286. uint32x4_t __ret;
  26287. __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
  26288. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26289. return __ret;
  26290. }
  26291. #endif
  26292. #ifdef __LITTLE_ENDIAN__
  26293. __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
  26294. int16x8_t __ret;
  26295. __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
  26296. return __ret;
  26297. }
  26298. #else
  26299. __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
  26300. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26301. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26302. int16x8_t __ret;
  26303. __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
  26304. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26305. return __ret;
  26306. }
  26307. #endif
  26308. #ifdef __LITTLE_ENDIAN__
  26309. __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
  26310. int64x2_t __ret;
  26311. __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
  26312. return __ret;
  26313. }
  26314. #else
  26315. __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
  26316. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26317. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26318. int64x2_t __ret;
  26319. __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
  26320. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26321. return __ret;
  26322. }
  26323. #endif
  26324. #ifdef __LITTLE_ENDIAN__
  26325. __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
  26326. int32x4_t __ret;
  26327. __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
  26328. return __ret;
  26329. }
  26330. #else
  26331. __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
  26332. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26333. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26334. int32x4_t __ret;
  26335. __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
  26336. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26337. return __ret;
  26338. }
  26339. #endif
  26340. #ifdef __LITTLE_ENDIAN__
  26341. __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
  26342. uint16x8_t __ret;
  26343. __ret = __p0 - vmovl_u8(__p1);
  26344. return __ret;
  26345. }
  26346. #else
  26347. __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
  26348. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26349. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26350. uint16x8_t __ret;
  26351. __ret = __rev0 - __noswap_vmovl_u8(__rev1);
  26352. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26353. return __ret;
  26354. }
  26355. #endif
  26356. #ifdef __LITTLE_ENDIAN__
  26357. __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
  26358. uint64x2_t __ret;
  26359. __ret = __p0 - vmovl_u32(__p1);
  26360. return __ret;
  26361. }
  26362. #else
  26363. __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
  26364. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26365. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26366. uint64x2_t __ret;
  26367. __ret = __rev0 - __noswap_vmovl_u32(__rev1);
  26368. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26369. return __ret;
  26370. }
  26371. #endif
  26372. #ifdef __LITTLE_ENDIAN__
  26373. __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
  26374. uint32x4_t __ret;
  26375. __ret = __p0 - vmovl_u16(__p1);
  26376. return __ret;
  26377. }
  26378. #else
  26379. __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
  26380. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26381. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26382. uint32x4_t __ret;
  26383. __ret = __rev0 - __noswap_vmovl_u16(__rev1);
  26384. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26385. return __ret;
  26386. }
  26387. #endif
  26388. #ifdef __LITTLE_ENDIAN__
  26389. __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
  26390. int16x8_t __ret;
  26391. __ret = __p0 - vmovl_s8(__p1);
  26392. return __ret;
  26393. }
  26394. #else
  26395. __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
  26396. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26397. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26398. int16x8_t __ret;
  26399. __ret = __rev0 - __noswap_vmovl_s8(__rev1);
  26400. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26401. return __ret;
  26402. }
  26403. #endif
  26404. #ifdef __LITTLE_ENDIAN__
  26405. __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
  26406. int64x2_t __ret;
  26407. __ret = __p0 - vmovl_s32(__p1);
  26408. return __ret;
  26409. }
  26410. #else
  26411. __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
  26412. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  26413. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  26414. int64x2_t __ret;
  26415. __ret = __rev0 - __noswap_vmovl_s32(__rev1);
  26416. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  26417. return __ret;
  26418. }
  26419. #endif
  26420. #ifdef __LITTLE_ENDIAN__
  26421. __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
  26422. int32x4_t __ret;
  26423. __ret = __p0 - vmovl_s16(__p1);
  26424. return __ret;
  26425. }
  26426. #else
  26427. __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
  26428. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26429. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26430. int32x4_t __ret;
  26431. __ret = __rev0 - __noswap_vmovl_s16(__rev1);
  26432. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  26433. return __ret;
  26434. }
  26435. #endif
  26436. #ifdef __LITTLE_ENDIAN__
  26437. __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
  26438. poly8x8_t __ret;
  26439. __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
  26440. return __ret;
  26441. }
  26442. #else
  26443. __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
  26444. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26445. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26446. poly8x8_t __ret;
  26447. __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
  26448. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26449. return __ret;
  26450. }
  26451. #endif
  26452. #ifdef __LITTLE_ENDIAN__
  26453. __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  26454. uint8x8_t __ret;
  26455. __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  26456. return __ret;
  26457. }
  26458. #else
  26459. __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  26460. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26461. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26462. uint8x8_t __ret;
  26463. __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  26464. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26465. return __ret;
  26466. }
  26467. #endif
  26468. #ifdef __LITTLE_ENDIAN__
  26469. __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
  26470. int8x8_t __ret;
  26471. __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  26472. return __ret;
  26473. }
  26474. #else
  26475. __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
  26476. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26477. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26478. int8x8_t __ret;
  26479. __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  26480. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26481. return __ret;
  26482. }
  26483. #endif
  26484. #ifdef __LITTLE_ENDIAN__
  26485. __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
  26486. poly8x8_t __ret;
  26487. __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
  26488. return __ret;
  26489. }
  26490. #else
  26491. __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
  26492. poly8x8x2_t __rev0;
  26493. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26494. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26495. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26496. poly8x8_t __ret;
  26497. __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
  26498. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26499. return __ret;
  26500. }
  26501. #endif
  26502. #ifdef __LITTLE_ENDIAN__
  26503. __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
  26504. uint8x8_t __ret;
  26505. __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
  26506. return __ret;
  26507. }
  26508. #else
  26509. __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
  26510. uint8x8x2_t __rev0;
  26511. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26512. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26513. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26514. uint8x8_t __ret;
  26515. __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
  26516. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26517. return __ret;
  26518. }
  26519. #endif
  26520. #ifdef __LITTLE_ENDIAN__
  26521. __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
  26522. int8x8_t __ret;
  26523. __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
  26524. return __ret;
  26525. }
  26526. #else
  26527. __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
  26528. int8x8x2_t __rev0;
  26529. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26530. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26531. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26532. int8x8_t __ret;
  26533. __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
  26534. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26535. return __ret;
  26536. }
  26537. #endif
  26538. #ifdef __LITTLE_ENDIAN__
  26539. __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
  26540. poly8x8_t __ret;
  26541. __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
  26542. return __ret;
  26543. }
  26544. #else
  26545. __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
  26546. poly8x8x3_t __rev0;
  26547. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26548. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26549. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26550. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26551. poly8x8_t __ret;
  26552. __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
  26553. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26554. return __ret;
  26555. }
  26556. #endif
  26557. #ifdef __LITTLE_ENDIAN__
  26558. __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
  26559. uint8x8_t __ret;
  26560. __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
  26561. return __ret;
  26562. }
  26563. #else
  26564. __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
  26565. uint8x8x3_t __rev0;
  26566. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26567. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26568. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26569. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26570. uint8x8_t __ret;
  26571. __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
  26572. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26573. return __ret;
  26574. }
  26575. #endif
  26576. #ifdef __LITTLE_ENDIAN__
  26577. __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
  26578. int8x8_t __ret;
  26579. __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
  26580. return __ret;
  26581. }
  26582. #else
  26583. __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
  26584. int8x8x3_t __rev0;
  26585. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26586. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26587. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26588. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26589. int8x8_t __ret;
  26590. __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
  26591. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26592. return __ret;
  26593. }
  26594. #endif
  26595. #ifdef __LITTLE_ENDIAN__
  26596. __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
  26597. poly8x8_t __ret;
  26598. __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
  26599. return __ret;
  26600. }
  26601. #else
  26602. __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
  26603. poly8x8x4_t __rev0;
  26604. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26605. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26606. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26607. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26608. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26609. poly8x8_t __ret;
  26610. __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
  26611. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26612. return __ret;
  26613. }
  26614. #endif
  26615. #ifdef __LITTLE_ENDIAN__
  26616. __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
  26617. uint8x8_t __ret;
  26618. __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
  26619. return __ret;
  26620. }
  26621. #else
  26622. __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
  26623. uint8x8x4_t __rev0;
  26624. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26625. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26626. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26627. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26628. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26629. uint8x8_t __ret;
  26630. __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
  26631. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26632. return __ret;
  26633. }
  26634. #endif
  26635. #ifdef __LITTLE_ENDIAN__
  26636. __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
  26637. int8x8_t __ret;
  26638. __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
  26639. return __ret;
  26640. }
  26641. #else
  26642. __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
  26643. int8x8x4_t __rev0;
  26644. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26645. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26646. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26647. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26648. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26649. int8x8_t __ret;
  26650. __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
  26651. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26652. return __ret;
  26653. }
  26654. #endif
  26655. #ifdef __LITTLE_ENDIAN__
  26656. __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
  26657. poly8x8_t __ret;
  26658. __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
  26659. return __ret;
  26660. }
  26661. #else
  26662. __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
  26663. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26664. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26665. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26666. poly8x8_t __ret;
  26667. __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
  26668. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26669. return __ret;
  26670. }
  26671. #endif
  26672. #ifdef __LITTLE_ENDIAN__
  26673. __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  26674. uint8x8_t __ret;
  26675. __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
  26676. return __ret;
  26677. }
  26678. #else
  26679. __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  26680. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26681. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26682. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26683. uint8x8_t __ret;
  26684. __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
  26685. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26686. return __ret;
  26687. }
  26688. #endif
  26689. #ifdef __LITTLE_ENDIAN__
  26690. __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  26691. int8x8_t __ret;
  26692. __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
  26693. return __ret;
  26694. }
  26695. #else
  26696. __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  26697. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26698. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26699. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26700. int8x8_t __ret;
  26701. __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
  26702. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26703. return __ret;
  26704. }
  26705. #endif
  26706. #ifdef __LITTLE_ENDIAN__
  26707. __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
  26708. poly8x8_t __ret;
  26709. __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
  26710. return __ret;
  26711. }
  26712. #else
  26713. __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
  26714. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26715. poly8x8x2_t __rev1;
  26716. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26717. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26718. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26719. poly8x8_t __ret;
  26720. __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
  26721. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26722. return __ret;
  26723. }
  26724. #endif
  26725. #ifdef __LITTLE_ENDIAN__
  26726. __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
  26727. uint8x8_t __ret;
  26728. __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
  26729. return __ret;
  26730. }
  26731. #else
  26732. __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
  26733. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26734. uint8x8x2_t __rev1;
  26735. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26736. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26737. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26738. uint8x8_t __ret;
  26739. __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
  26740. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26741. return __ret;
  26742. }
  26743. #endif
  26744. #ifdef __LITTLE_ENDIAN__
  26745. __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
  26746. int8x8_t __ret;
  26747. __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
  26748. return __ret;
  26749. }
  26750. #else
  26751. __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
  26752. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26753. int8x8x2_t __rev1;
  26754. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26755. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26756. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26757. int8x8_t __ret;
  26758. __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
  26759. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26760. return __ret;
  26761. }
  26762. #endif
  26763. #ifdef __LITTLE_ENDIAN__
  26764. __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
  26765. poly8x8_t __ret;
  26766. __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
  26767. return __ret;
  26768. }
  26769. #else
  26770. __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
  26771. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26772. poly8x8x3_t __rev1;
  26773. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26774. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26775. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26776. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26777. poly8x8_t __ret;
  26778. __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
  26779. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26780. return __ret;
  26781. }
  26782. #endif
  26783. #ifdef __LITTLE_ENDIAN__
  26784. __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
  26785. uint8x8_t __ret;
  26786. __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
  26787. return __ret;
  26788. }
  26789. #else
  26790. __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
  26791. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26792. uint8x8x3_t __rev1;
  26793. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26794. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26795. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26796. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26797. uint8x8_t __ret;
  26798. __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
  26799. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26800. return __ret;
  26801. }
  26802. #endif
  26803. #ifdef __LITTLE_ENDIAN__
  26804. __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
  26805. int8x8_t __ret;
  26806. __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
  26807. return __ret;
  26808. }
  26809. #else
  26810. __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
  26811. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26812. int8x8x3_t __rev1;
  26813. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26814. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26815. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26816. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26817. int8x8_t __ret;
  26818. __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
  26819. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26820. return __ret;
  26821. }
  26822. #endif
  26823. #ifdef __LITTLE_ENDIAN__
  26824. __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
  26825. poly8x8_t __ret;
  26826. __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
  26827. return __ret;
  26828. }
  26829. #else
  26830. __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
  26831. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26832. poly8x8x4_t __rev1;
  26833. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26834. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26835. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26836. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26837. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26838. poly8x8_t __ret;
  26839. __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
  26840. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26841. return __ret;
  26842. }
  26843. #endif
  26844. #ifdef __LITTLE_ENDIAN__
  26845. __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
  26846. uint8x8_t __ret;
  26847. __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
  26848. return __ret;
  26849. }
  26850. #else
  26851. __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
  26852. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26853. uint8x8x4_t __rev1;
  26854. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26855. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26856. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26857. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26858. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26859. uint8x8_t __ret;
  26860. __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
  26861. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26862. return __ret;
  26863. }
  26864. #endif
  26865. #ifdef __LITTLE_ENDIAN__
  26866. __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
  26867. int8x8_t __ret;
  26868. __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
  26869. return __ret;
  26870. }
  26871. #else
  26872. __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
  26873. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26874. int8x8x4_t __rev1;
  26875. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26876. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26877. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
  26878. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
  26879. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  26880. int8x8_t __ret;
  26881. __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
  26882. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  26883. return __ret;
  26884. }
  26885. #endif
  26886. #ifdef __LITTLE_ENDIAN__
  26887. __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
  26888. poly8x8x2_t __ret;
  26889. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
  26890. return __ret;
  26891. }
  26892. #else
  26893. __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
  26894. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26895. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26896. poly8x8x2_t __ret;
  26897. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
  26898. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26899. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26900. return __ret;
  26901. }
  26902. #endif
  26903. #ifdef __LITTLE_ENDIAN__
  26904. __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
  26905. poly16x4x2_t __ret;
  26906. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
  26907. return __ret;
  26908. }
  26909. #else
  26910. __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
  26911. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26912. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26913. poly16x4x2_t __ret;
  26914. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
  26915. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  26916. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  26917. return __ret;
  26918. }
  26919. #endif
  26920. #ifdef __LITTLE_ENDIAN__
  26921. __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  26922. poly8x16x2_t __ret;
  26923. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
  26924. return __ret;
  26925. }
  26926. #else
  26927. __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  26928. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26929. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26930. poly8x16x2_t __ret;
  26931. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
  26932. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26933. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26934. return __ret;
  26935. }
  26936. #endif
  26937. #ifdef __LITTLE_ENDIAN__
  26938. __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  26939. poly16x8x2_t __ret;
  26940. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
  26941. return __ret;
  26942. }
  26943. #else
  26944. __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  26945. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26946. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26947. poly16x8x2_t __ret;
  26948. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
  26949. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  26950. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  26951. return __ret;
  26952. }
  26953. #endif
  26954. #ifdef __LITTLE_ENDIAN__
  26955. __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  26956. uint8x16x2_t __ret;
  26957. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
  26958. return __ret;
  26959. }
  26960. #else
  26961. __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  26962. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26963. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26964. uint8x16x2_t __ret;
  26965. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  26966. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26967. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  26968. return __ret;
  26969. }
  26970. #endif
  26971. #ifdef __LITTLE_ENDIAN__
  26972. __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  26973. uint32x4x2_t __ret;
  26974. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
  26975. return __ret;
  26976. }
  26977. #else
  26978. __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  26979. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  26980. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  26981. uint32x4x2_t __ret;
  26982. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  26983. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  26984. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  26985. return __ret;
  26986. }
  26987. #endif
  26988. #ifdef __LITTLE_ENDIAN__
  26989. __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  26990. uint16x8x2_t __ret;
  26991. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
  26992. return __ret;
  26993. }
  26994. #else
  26995. __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  26996. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  26997. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  26998. uint16x8x2_t __ret;
  26999. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27000. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27001. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27002. return __ret;
  27003. }
  27004. #endif
  27005. #ifdef __LITTLE_ENDIAN__
  27006. __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
  27007. int8x16x2_t __ret;
  27008. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
  27009. return __ret;
  27010. }
  27011. #else
  27012. __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
  27013. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27014. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27015. int8x16x2_t __ret;
  27016. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  27017. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27018. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27019. return __ret;
  27020. }
  27021. #endif
  27022. #ifdef __LITTLE_ENDIAN__
  27023. __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
  27024. float32x4x2_t __ret;
  27025. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
  27026. return __ret;
  27027. }
  27028. #else
  27029. __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
  27030. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27031. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27032. float32x4x2_t __ret;
  27033. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  27034. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27035. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27036. return __ret;
  27037. }
  27038. #endif
  27039. #ifdef __LITTLE_ENDIAN__
  27040. __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
  27041. int32x4x2_t __ret;
  27042. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
  27043. return __ret;
  27044. }
  27045. #else
  27046. __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
  27047. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27048. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27049. int32x4x2_t __ret;
  27050. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  27051. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27052. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27053. return __ret;
  27054. }
  27055. #endif
  27056. #ifdef __LITTLE_ENDIAN__
  27057. __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
  27058. int16x8x2_t __ret;
  27059. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
  27060. return __ret;
  27061. }
  27062. #else
  27063. __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
  27064. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27065. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27066. int16x8x2_t __ret;
  27067. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  27068. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27069. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27070. return __ret;
  27071. }
  27072. #endif
  27073. #ifdef __LITTLE_ENDIAN__
  27074. __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27075. uint8x8x2_t __ret;
  27076. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
  27077. return __ret;
  27078. }
  27079. #else
  27080. __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27081. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27082. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27083. uint8x8x2_t __ret;
  27084. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27085. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27086. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27087. return __ret;
  27088. }
  27089. #endif
  27090. #ifdef __LITTLE_ENDIAN__
  27091. __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27092. uint32x2x2_t __ret;
  27093. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
  27094. return __ret;
  27095. }
  27096. #else
  27097. __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27098. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27099. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27100. uint32x2x2_t __ret;
  27101. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  27102. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27103. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27104. return __ret;
  27105. }
  27106. #endif
  27107. #ifdef __LITTLE_ENDIAN__
  27108. __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27109. uint16x4x2_t __ret;
  27110. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
  27111. return __ret;
  27112. }
  27113. #else
  27114. __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27115. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27116. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27117. uint16x4x2_t __ret;
  27118. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27119. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27120. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27121. return __ret;
  27122. }
  27123. #endif
  27124. #ifdef __LITTLE_ENDIAN__
  27125. __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
  27126. int8x8x2_t __ret;
  27127. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
  27128. return __ret;
  27129. }
  27130. #else
  27131. __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
  27132. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27133. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27134. int8x8x2_t __ret;
  27135. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  27136. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27137. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27138. return __ret;
  27139. }
  27140. #endif
  27141. #ifdef __LITTLE_ENDIAN__
  27142. __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
  27143. float32x2x2_t __ret;
  27144. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
  27145. return __ret;
  27146. }
  27147. #else
  27148. __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
  27149. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27150. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27151. float32x2x2_t __ret;
  27152. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  27153. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27154. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27155. return __ret;
  27156. }
  27157. #endif
  27158. #ifdef __LITTLE_ENDIAN__
  27159. __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
  27160. int32x2x2_t __ret;
  27161. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
  27162. return __ret;
  27163. }
  27164. #else
  27165. __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
  27166. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27167. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27168. int32x2x2_t __ret;
  27169. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  27170. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27171. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27172. return __ret;
  27173. }
  27174. #endif
  27175. #ifdef __LITTLE_ENDIAN__
  27176. __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
  27177. int16x4x2_t __ret;
  27178. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
  27179. return __ret;
  27180. }
  27181. #else
  27182. __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
  27183. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27184. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27185. int16x4x2_t __ret;
  27186. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  27187. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27188. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27189. return __ret;
  27190. }
  27191. #endif
  27192. #ifdef __LITTLE_ENDIAN__
  27193. __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27194. uint8x8_t __ret;
  27195. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  27196. return __ret;
  27197. }
  27198. #else
  27199. __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27200. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27201. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27202. uint8x8_t __ret;
  27203. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27204. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27205. return __ret;
  27206. }
  27207. #endif
  27208. #ifdef __LITTLE_ENDIAN__
  27209. __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27210. uint16x4_t __ret;
  27211. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  27212. return __ret;
  27213. }
  27214. #else
  27215. __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27216. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27217. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27218. uint16x4_t __ret;
  27219. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27220. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  27221. return __ret;
  27222. }
  27223. #endif
  27224. #ifdef __LITTLE_ENDIAN__
  27225. __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27226. uint8x16_t __ret;
  27227. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  27228. return __ret;
  27229. }
  27230. #else
  27231. __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27232. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27233. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27234. uint8x16_t __ret;
  27235. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  27236. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27237. return __ret;
  27238. }
  27239. #endif
  27240. #ifdef __LITTLE_ENDIAN__
  27241. __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27242. uint16x8_t __ret;
  27243. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  27244. return __ret;
  27245. }
  27246. #else
  27247. __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27248. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27249. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27250. uint16x8_t __ret;
  27251. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27252. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27253. return __ret;
  27254. }
  27255. #endif
  27256. #ifdef __LITTLE_ENDIAN__
  27257. __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27258. uint8x16_t __ret;
  27259. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  27260. return __ret;
  27261. }
  27262. #else
  27263. __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27264. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27265. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27266. uint8x16_t __ret;
  27267. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  27268. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27269. return __ret;
  27270. }
  27271. #endif
  27272. #ifdef __LITTLE_ENDIAN__
  27273. __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27274. uint32x4_t __ret;
  27275. __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  27276. return __ret;
  27277. }
  27278. #else
  27279. __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27280. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27281. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27282. uint32x4_t __ret;
  27283. __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  27284. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  27285. return __ret;
  27286. }
  27287. #endif
  27288. #ifdef __LITTLE_ENDIAN__
  27289. __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27290. uint16x8_t __ret;
  27291. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  27292. return __ret;
  27293. }
  27294. #else
  27295. __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27296. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27297. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27298. uint16x8_t __ret;
  27299. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27300. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27301. return __ret;
  27302. }
  27303. #endif
  27304. #ifdef __LITTLE_ENDIAN__
  27305. __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
  27306. uint8x16_t __ret;
  27307. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  27308. return __ret;
  27309. }
  27310. #else
  27311. __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
  27312. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27313. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27314. uint8x16_t __ret;
  27315. __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  27316. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27317. return __ret;
  27318. }
  27319. #endif
  27320. #ifdef __LITTLE_ENDIAN__
  27321. __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
  27322. uint32x4_t __ret;
  27323. __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  27324. return __ret;
  27325. }
  27326. #else
  27327. __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
  27328. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27329. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27330. uint32x4_t __ret;
  27331. __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  27332. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  27333. return __ret;
  27334. }
  27335. #endif
  27336. #ifdef __LITTLE_ENDIAN__
  27337. __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
  27338. uint16x8_t __ret;
  27339. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  27340. return __ret;
  27341. }
  27342. #else
  27343. __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
  27344. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27345. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27346. uint16x8_t __ret;
  27347. __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27348. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27349. return __ret;
  27350. }
  27351. #endif
  27352. #ifdef __LITTLE_ENDIAN__
  27353. __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27354. uint8x8_t __ret;
  27355. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  27356. return __ret;
  27357. }
  27358. #else
  27359. __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27360. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27361. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27362. uint8x8_t __ret;
  27363. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27364. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27365. return __ret;
  27366. }
  27367. #endif
  27368. #ifdef __LITTLE_ENDIAN__
  27369. __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27370. uint32x2_t __ret;
  27371. __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  27372. return __ret;
  27373. }
  27374. #else
  27375. __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27376. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27377. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27378. uint32x2_t __ret;
  27379. __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  27380. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  27381. return __ret;
  27382. }
  27383. #endif
  27384. #ifdef __LITTLE_ENDIAN__
  27385. __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27386. uint16x4_t __ret;
  27387. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  27388. return __ret;
  27389. }
  27390. #else
  27391. __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27392. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27393. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27394. uint16x4_t __ret;
  27395. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27396. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  27397. return __ret;
  27398. }
  27399. #endif
  27400. #ifdef __LITTLE_ENDIAN__
  27401. __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
  27402. uint8x8_t __ret;
  27403. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  27404. return __ret;
  27405. }
  27406. #else
  27407. __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
  27408. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27409. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27410. uint8x8_t __ret;
  27411. __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27412. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  27413. return __ret;
  27414. }
  27415. #endif
  27416. #ifdef __LITTLE_ENDIAN__
  27417. __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
  27418. uint32x2_t __ret;
  27419. __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  27420. return __ret;
  27421. }
  27422. #else
  27423. __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
  27424. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27425. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27426. uint32x2_t __ret;
  27427. __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  27428. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  27429. return __ret;
  27430. }
  27431. #endif
  27432. #ifdef __LITTLE_ENDIAN__
  27433. __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
  27434. uint16x4_t __ret;
  27435. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  27436. return __ret;
  27437. }
  27438. #else
  27439. __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
  27440. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27441. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27442. uint16x4_t __ret;
  27443. __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27444. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  27445. return __ret;
  27446. }
  27447. #endif
  27448. #ifdef __LITTLE_ENDIAN__
  27449. __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27450. poly8x8x2_t __ret;
  27451. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
  27452. return __ret;
  27453. }
  27454. #else
  27455. __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27456. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27457. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27458. poly8x8x2_t __ret;
  27459. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
  27460. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27461. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27462. return __ret;
  27463. }
  27464. #endif
  27465. #ifdef __LITTLE_ENDIAN__
  27466. __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27467. poly16x4x2_t __ret;
  27468. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
  27469. return __ret;
  27470. }
  27471. #else
  27472. __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27473. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27474. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27475. poly16x4x2_t __ret;
  27476. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
  27477. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27478. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27479. return __ret;
  27480. }
  27481. #endif
  27482. #ifdef __LITTLE_ENDIAN__
  27483. __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27484. poly8x16x2_t __ret;
  27485. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
  27486. return __ret;
  27487. }
  27488. #else
  27489. __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27490. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27491. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27492. poly8x16x2_t __ret;
  27493. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
  27494. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27495. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27496. return __ret;
  27497. }
  27498. #endif
  27499. #ifdef __LITTLE_ENDIAN__
  27500. __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27501. poly16x8x2_t __ret;
  27502. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
  27503. return __ret;
  27504. }
  27505. #else
  27506. __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27507. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27508. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27509. poly16x8x2_t __ret;
  27510. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
  27511. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27512. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27513. return __ret;
  27514. }
  27515. #endif
  27516. #ifdef __LITTLE_ENDIAN__
  27517. __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27518. uint8x16x2_t __ret;
  27519. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
  27520. return __ret;
  27521. }
  27522. #else
  27523. __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27524. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27525. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27526. uint8x16x2_t __ret;
  27527. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  27528. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27529. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27530. return __ret;
  27531. }
  27532. #endif
  27533. #ifdef __LITTLE_ENDIAN__
  27534. __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27535. uint32x4x2_t __ret;
  27536. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
  27537. return __ret;
  27538. }
  27539. #else
  27540. __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27541. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27542. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27543. uint32x4x2_t __ret;
  27544. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  27545. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27546. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27547. return __ret;
  27548. }
  27549. #endif
  27550. #ifdef __LITTLE_ENDIAN__
  27551. __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27552. uint16x8x2_t __ret;
  27553. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
  27554. return __ret;
  27555. }
  27556. #else
  27557. __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27558. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27559. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27560. uint16x8x2_t __ret;
  27561. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27562. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27563. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27564. return __ret;
  27565. }
  27566. #endif
  27567. #ifdef __LITTLE_ENDIAN__
  27568. __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
  27569. int8x16x2_t __ret;
  27570. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
  27571. return __ret;
  27572. }
  27573. #else
  27574. __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
  27575. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27576. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27577. int8x16x2_t __ret;
  27578. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  27579. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27580. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27581. return __ret;
  27582. }
  27583. #endif
  27584. #ifdef __LITTLE_ENDIAN__
  27585. __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
  27586. float32x4x2_t __ret;
  27587. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
  27588. return __ret;
  27589. }
  27590. #else
  27591. __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
  27592. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27593. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27594. float32x4x2_t __ret;
  27595. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  27596. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27597. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27598. return __ret;
  27599. }
  27600. #endif
  27601. #ifdef __LITTLE_ENDIAN__
  27602. __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
  27603. int32x4x2_t __ret;
  27604. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
  27605. return __ret;
  27606. }
  27607. #else
  27608. __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
  27609. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27610. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27611. int32x4x2_t __ret;
  27612. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  27613. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27614. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27615. return __ret;
  27616. }
  27617. #endif
  27618. #ifdef __LITTLE_ENDIAN__
  27619. __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
  27620. int16x8x2_t __ret;
  27621. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
  27622. return __ret;
  27623. }
  27624. #else
  27625. __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
  27626. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27627. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27628. int16x8x2_t __ret;
  27629. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  27630. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27631. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27632. return __ret;
  27633. }
  27634. #endif
  27635. #ifdef __LITTLE_ENDIAN__
  27636. __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27637. uint8x8x2_t __ret;
  27638. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
  27639. return __ret;
  27640. }
  27641. #else
  27642. __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27643. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27644. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27645. uint8x8x2_t __ret;
  27646. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27647. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27648. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27649. return __ret;
  27650. }
  27651. #endif
  27652. #ifdef __LITTLE_ENDIAN__
  27653. __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27654. uint32x2x2_t __ret;
  27655. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
  27656. return __ret;
  27657. }
  27658. #else
  27659. __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27660. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27661. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27662. uint32x2x2_t __ret;
  27663. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  27664. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27665. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27666. return __ret;
  27667. }
  27668. #endif
  27669. #ifdef __LITTLE_ENDIAN__
  27670. __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27671. uint16x4x2_t __ret;
  27672. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
  27673. return __ret;
  27674. }
  27675. #else
  27676. __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27677. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27678. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27679. uint16x4x2_t __ret;
  27680. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27681. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27682. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27683. return __ret;
  27684. }
  27685. #endif
  27686. #ifdef __LITTLE_ENDIAN__
  27687. __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
  27688. int8x8x2_t __ret;
  27689. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
  27690. return __ret;
  27691. }
  27692. #else
  27693. __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
  27694. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27695. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27696. int8x8x2_t __ret;
  27697. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  27698. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27699. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27700. return __ret;
  27701. }
  27702. #endif
  27703. #ifdef __LITTLE_ENDIAN__
  27704. __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
  27705. float32x2x2_t __ret;
  27706. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
  27707. return __ret;
  27708. }
  27709. #else
  27710. __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
  27711. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27712. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27713. float32x2x2_t __ret;
  27714. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  27715. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27716. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27717. return __ret;
  27718. }
  27719. #endif
  27720. #ifdef __LITTLE_ENDIAN__
  27721. __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
  27722. int32x2x2_t __ret;
  27723. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
  27724. return __ret;
  27725. }
  27726. #else
  27727. __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
  27728. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27729. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27730. int32x2x2_t __ret;
  27731. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  27732. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27733. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27734. return __ret;
  27735. }
  27736. #endif
  27737. #ifdef __LITTLE_ENDIAN__
  27738. __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
  27739. int16x4x2_t __ret;
  27740. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
  27741. return __ret;
  27742. }
  27743. #else
  27744. __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
  27745. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27746. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27747. int16x4x2_t __ret;
  27748. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  27749. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27750. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27751. return __ret;
  27752. }
  27753. #endif
  27754. #ifdef __LITTLE_ENDIAN__
  27755. __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27756. poly8x8x2_t __ret;
  27757. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
  27758. return __ret;
  27759. }
  27760. #else
  27761. __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
  27762. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27763. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27764. poly8x8x2_t __ret;
  27765. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
  27766. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27767. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27768. return __ret;
  27769. }
  27770. #endif
  27771. #ifdef __LITTLE_ENDIAN__
  27772. __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27773. poly16x4x2_t __ret;
  27774. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
  27775. return __ret;
  27776. }
  27777. #else
  27778. __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
  27779. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27780. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27781. poly16x4x2_t __ret;
  27782. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
  27783. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27784. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27785. return __ret;
  27786. }
  27787. #endif
  27788. #ifdef __LITTLE_ENDIAN__
  27789. __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27790. poly8x16x2_t __ret;
  27791. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
  27792. return __ret;
  27793. }
  27794. #else
  27795. __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
  27796. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27797. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27798. poly8x16x2_t __ret;
  27799. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
  27800. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27801. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27802. return __ret;
  27803. }
  27804. #endif
  27805. #ifdef __LITTLE_ENDIAN__
  27806. __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27807. poly16x8x2_t __ret;
  27808. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
  27809. return __ret;
  27810. }
  27811. #else
  27812. __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
  27813. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27814. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27815. poly16x8x2_t __ret;
  27816. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
  27817. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27818. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27819. return __ret;
  27820. }
  27821. #endif
  27822. #ifdef __LITTLE_ENDIAN__
  27823. __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27824. uint8x16x2_t __ret;
  27825. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
  27826. return __ret;
  27827. }
  27828. #else
  27829. __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  27830. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27831. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27832. uint8x16x2_t __ret;
  27833. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  27834. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27835. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27836. return __ret;
  27837. }
  27838. #endif
  27839. #ifdef __LITTLE_ENDIAN__
  27840. __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27841. uint32x4x2_t __ret;
  27842. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
  27843. return __ret;
  27844. }
  27845. #else
  27846. __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  27847. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27848. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27849. uint32x4x2_t __ret;
  27850. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  27851. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27852. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27853. return __ret;
  27854. }
  27855. #endif
  27856. #ifdef __LITTLE_ENDIAN__
  27857. __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27858. uint16x8x2_t __ret;
  27859. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
  27860. return __ret;
  27861. }
  27862. #else
  27863. __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  27864. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27865. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27866. uint16x8x2_t __ret;
  27867. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  27868. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27869. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27870. return __ret;
  27871. }
  27872. #endif
  27873. #ifdef __LITTLE_ENDIAN__
  27874. __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
  27875. int8x16x2_t __ret;
  27876. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
  27877. return __ret;
  27878. }
  27879. #else
  27880. __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
  27881. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27882. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27883. int8x16x2_t __ret;
  27884. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  27885. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27886. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  27887. return __ret;
  27888. }
  27889. #endif
  27890. #ifdef __LITTLE_ENDIAN__
  27891. __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
  27892. float32x4x2_t __ret;
  27893. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
  27894. return __ret;
  27895. }
  27896. #else
  27897. __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
  27898. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27899. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27900. float32x4x2_t __ret;
  27901. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  27902. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27903. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27904. return __ret;
  27905. }
  27906. #endif
  27907. #ifdef __LITTLE_ENDIAN__
  27908. __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
  27909. int32x4x2_t __ret;
  27910. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
  27911. return __ret;
  27912. }
  27913. #else
  27914. __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
  27915. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27916. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27917. int32x4x2_t __ret;
  27918. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  27919. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27920. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27921. return __ret;
  27922. }
  27923. #endif
  27924. #ifdef __LITTLE_ENDIAN__
  27925. __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
  27926. int16x8x2_t __ret;
  27927. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
  27928. return __ret;
  27929. }
  27930. #else
  27931. __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
  27932. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27933. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27934. int16x8x2_t __ret;
  27935. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  27936. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27937. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27938. return __ret;
  27939. }
  27940. #endif
  27941. #ifdef __LITTLE_ENDIAN__
  27942. __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27943. uint8x8x2_t __ret;
  27944. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
  27945. return __ret;
  27946. }
  27947. #else
  27948. __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
  27949. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  27950. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  27951. uint8x8x2_t __ret;
  27952. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  27953. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  27954. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  27955. return __ret;
  27956. }
  27957. #endif
  27958. #ifdef __LITTLE_ENDIAN__
  27959. __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27960. uint32x2x2_t __ret;
  27961. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
  27962. return __ret;
  27963. }
  27964. #else
  27965. __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
  27966. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  27967. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  27968. uint32x2x2_t __ret;
  27969. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  27970. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  27971. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  27972. return __ret;
  27973. }
  27974. #endif
  27975. #ifdef __LITTLE_ENDIAN__
  27976. __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27977. uint16x4x2_t __ret;
  27978. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
  27979. return __ret;
  27980. }
  27981. #else
  27982. __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
  27983. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  27984. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  27985. uint16x4x2_t __ret;
  27986. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  27987. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  27988. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  27989. return __ret;
  27990. }
  27991. #endif
  27992. #ifdef __LITTLE_ENDIAN__
  27993. __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
  27994. int8x8x2_t __ret;
  27995. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
  27996. return __ret;
  27997. }
  27998. #else
  27999. __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
  28000. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  28001. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  28002. int8x8x2_t __ret;
  28003. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  28004. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  28005. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  28006. return __ret;
  28007. }
  28008. #endif
  28009. #ifdef __LITTLE_ENDIAN__
  28010. __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
  28011. float32x2x2_t __ret;
  28012. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
  28013. return __ret;
  28014. }
  28015. #else
  28016. __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
  28017. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  28018. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  28019. float32x2x2_t __ret;
  28020. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  28021. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  28022. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  28023. return __ret;
  28024. }
  28025. #endif
  28026. #ifdef __LITTLE_ENDIAN__
  28027. __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
  28028. int32x2x2_t __ret;
  28029. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
  28030. return __ret;
  28031. }
  28032. #else
  28033. __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
  28034. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  28035. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  28036. int32x2x2_t __ret;
  28037. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  28038. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
  28039. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
  28040. return __ret;
  28041. }
  28042. #endif
  28043. #ifdef __LITTLE_ENDIAN__
  28044. __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
  28045. int16x4x2_t __ret;
  28046. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
  28047. return __ret;
  28048. }
  28049. #else
  28050. __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
  28051. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  28052. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  28053. int16x4x2_t __ret;
  28054. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  28055. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  28056. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  28057. return __ret;
  28058. }
  28059. #endif
  28060. #if !defined(__aarch64__)
  28061. #ifdef __LITTLE_ENDIAN__
  28062. __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
  28063. poly8x8_t __ret;
  28064. __ret = (poly8x8_t)(__p0);
  28065. return __ret;
  28066. }
  28067. #else
  28068. __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
  28069. poly8x8_t __ret;
  28070. __ret = (poly8x8_t)(__p0);
  28071. return __ret;
  28072. }
  28073. #endif
  28074. #ifdef __LITTLE_ENDIAN__
  28075. __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
  28076. poly8x8_t __ret;
  28077. __ret = (poly8x8_t)(__p0);
  28078. return __ret;
  28079. }
  28080. #else
  28081. __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
  28082. poly8x8_t __ret;
  28083. __ret = (poly8x8_t)(__p0);
  28084. return __ret;
  28085. }
  28086. #endif
  28087. #ifdef __LITTLE_ENDIAN__
  28088. __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
  28089. poly8x8_t __ret;
  28090. __ret = (poly8x8_t)(__p0);
  28091. return __ret;
  28092. }
  28093. #else
  28094. __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
  28095. poly8x8_t __ret;
  28096. __ret = (poly8x8_t)(__p0);
  28097. return __ret;
  28098. }
  28099. #endif
  28100. #ifdef __LITTLE_ENDIAN__
  28101. __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
  28102. poly8x8_t __ret;
  28103. __ret = (poly8x8_t)(__p0);
  28104. return __ret;
  28105. }
  28106. #else
  28107. __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
  28108. poly8x8_t __ret;
  28109. __ret = (poly8x8_t)(__p0);
  28110. return __ret;
  28111. }
  28112. #endif
  28113. #ifdef __LITTLE_ENDIAN__
  28114. __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
  28115. poly8x8_t __ret;
  28116. __ret = (poly8x8_t)(__p0);
  28117. return __ret;
  28118. }
  28119. #else
  28120. __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
  28121. poly8x8_t __ret;
  28122. __ret = (poly8x8_t)(__p0);
  28123. return __ret;
  28124. }
  28125. #endif
  28126. #ifdef __LITTLE_ENDIAN__
  28127. __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
  28128. poly8x8_t __ret;
  28129. __ret = (poly8x8_t)(__p0);
  28130. return __ret;
  28131. }
  28132. #else
  28133. __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
  28134. poly8x8_t __ret;
  28135. __ret = (poly8x8_t)(__p0);
  28136. return __ret;
  28137. }
  28138. #endif
  28139. #ifdef __LITTLE_ENDIAN__
  28140. __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
  28141. poly8x8_t __ret;
  28142. __ret = (poly8x8_t)(__p0);
  28143. return __ret;
  28144. }
  28145. #else
  28146. __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
  28147. poly8x8_t __ret;
  28148. __ret = (poly8x8_t)(__p0);
  28149. return __ret;
  28150. }
  28151. #endif
  28152. #ifdef __LITTLE_ENDIAN__
  28153. __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
  28154. poly8x8_t __ret;
  28155. __ret = (poly8x8_t)(__p0);
  28156. return __ret;
  28157. }
  28158. #else
  28159. __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
  28160. poly8x8_t __ret;
  28161. __ret = (poly8x8_t)(__p0);
  28162. return __ret;
  28163. }
  28164. #endif
  28165. #ifdef __LITTLE_ENDIAN__
  28166. __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
  28167. poly8x8_t __ret;
  28168. __ret = (poly8x8_t)(__p0);
  28169. return __ret;
  28170. }
  28171. #else
  28172. __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
  28173. poly8x8_t __ret;
  28174. __ret = (poly8x8_t)(__p0);
  28175. return __ret;
  28176. }
  28177. #endif
  28178. #ifdef __LITTLE_ENDIAN__
  28179. __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
  28180. poly8x8_t __ret;
  28181. __ret = (poly8x8_t)(__p0);
  28182. return __ret;
  28183. }
  28184. #else
  28185. __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
  28186. poly8x8_t __ret;
  28187. __ret = (poly8x8_t)(__p0);
  28188. return __ret;
  28189. }
  28190. #endif
  28191. #ifdef __LITTLE_ENDIAN__
  28192. __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
  28193. poly8x8_t __ret;
  28194. __ret = (poly8x8_t)(__p0);
  28195. return __ret;
  28196. }
  28197. #else
  28198. __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
  28199. poly8x8_t __ret;
  28200. __ret = (poly8x8_t)(__p0);
  28201. return __ret;
  28202. }
  28203. #endif
  28204. #ifdef __LITTLE_ENDIAN__
  28205. __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
  28206. poly16x4_t __ret;
  28207. __ret = (poly16x4_t)(__p0);
  28208. return __ret;
  28209. }
  28210. #else
  28211. __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
  28212. poly16x4_t __ret;
  28213. __ret = (poly16x4_t)(__p0);
  28214. return __ret;
  28215. }
  28216. #endif
  28217. #ifdef __LITTLE_ENDIAN__
  28218. __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
  28219. poly16x4_t __ret;
  28220. __ret = (poly16x4_t)(__p0);
  28221. return __ret;
  28222. }
  28223. #else
  28224. __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
  28225. poly16x4_t __ret;
  28226. __ret = (poly16x4_t)(__p0);
  28227. return __ret;
  28228. }
  28229. #endif
  28230. #ifdef __LITTLE_ENDIAN__
  28231. __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
  28232. poly16x4_t __ret;
  28233. __ret = (poly16x4_t)(__p0);
  28234. return __ret;
  28235. }
  28236. #else
  28237. __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
  28238. poly16x4_t __ret;
  28239. __ret = (poly16x4_t)(__p0);
  28240. return __ret;
  28241. }
  28242. #endif
  28243. #ifdef __LITTLE_ENDIAN__
  28244. __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
  28245. poly16x4_t __ret;
  28246. __ret = (poly16x4_t)(__p0);
  28247. return __ret;
  28248. }
  28249. #else
  28250. __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
  28251. poly16x4_t __ret;
  28252. __ret = (poly16x4_t)(__p0);
  28253. return __ret;
  28254. }
  28255. #endif
  28256. #ifdef __LITTLE_ENDIAN__
  28257. __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
  28258. poly16x4_t __ret;
  28259. __ret = (poly16x4_t)(__p0);
  28260. return __ret;
  28261. }
  28262. #else
  28263. __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
  28264. poly16x4_t __ret;
  28265. __ret = (poly16x4_t)(__p0);
  28266. return __ret;
  28267. }
  28268. #endif
  28269. #ifdef __LITTLE_ENDIAN__
  28270. __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
  28271. poly16x4_t __ret;
  28272. __ret = (poly16x4_t)(__p0);
  28273. return __ret;
  28274. }
  28275. #else
  28276. __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
  28277. poly16x4_t __ret;
  28278. __ret = (poly16x4_t)(__p0);
  28279. return __ret;
  28280. }
  28281. #endif
  28282. #ifdef __LITTLE_ENDIAN__
  28283. __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
  28284. poly16x4_t __ret;
  28285. __ret = (poly16x4_t)(__p0);
  28286. return __ret;
  28287. }
  28288. #else
  28289. __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
  28290. poly16x4_t __ret;
  28291. __ret = (poly16x4_t)(__p0);
  28292. return __ret;
  28293. }
  28294. #endif
  28295. #ifdef __LITTLE_ENDIAN__
  28296. __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
  28297. poly16x4_t __ret;
  28298. __ret = (poly16x4_t)(__p0);
  28299. return __ret;
  28300. }
  28301. #else
  28302. __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
  28303. poly16x4_t __ret;
  28304. __ret = (poly16x4_t)(__p0);
  28305. return __ret;
  28306. }
  28307. #endif
  28308. #ifdef __LITTLE_ENDIAN__
  28309. __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
  28310. poly16x4_t __ret;
  28311. __ret = (poly16x4_t)(__p0);
  28312. return __ret;
  28313. }
  28314. #else
  28315. __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
  28316. poly16x4_t __ret;
  28317. __ret = (poly16x4_t)(__p0);
  28318. return __ret;
  28319. }
  28320. #endif
  28321. #ifdef __LITTLE_ENDIAN__
  28322. __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
  28323. poly16x4_t __ret;
  28324. __ret = (poly16x4_t)(__p0);
  28325. return __ret;
  28326. }
  28327. #else
  28328. __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
  28329. poly16x4_t __ret;
  28330. __ret = (poly16x4_t)(__p0);
  28331. return __ret;
  28332. }
  28333. #endif
  28334. #ifdef __LITTLE_ENDIAN__
  28335. __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
  28336. poly16x4_t __ret;
  28337. __ret = (poly16x4_t)(__p0);
  28338. return __ret;
  28339. }
  28340. #else
  28341. __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
  28342. poly16x4_t __ret;
  28343. __ret = (poly16x4_t)(__p0);
  28344. return __ret;
  28345. }
  28346. #endif
  28347. #ifdef __LITTLE_ENDIAN__
  28348. __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
  28349. poly8x16_t __ret;
  28350. __ret = (poly8x16_t)(__p0);
  28351. return __ret;
  28352. }
  28353. #else
  28354. __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
  28355. poly8x16_t __ret;
  28356. __ret = (poly8x16_t)(__p0);
  28357. return __ret;
  28358. }
  28359. #endif
  28360. #ifdef __LITTLE_ENDIAN__
  28361. __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
  28362. poly8x16_t __ret;
  28363. __ret = (poly8x16_t)(__p0);
  28364. return __ret;
  28365. }
  28366. #else
  28367. __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
  28368. poly8x16_t __ret;
  28369. __ret = (poly8x16_t)(__p0);
  28370. return __ret;
  28371. }
  28372. #endif
  28373. #ifdef __LITTLE_ENDIAN__
  28374. __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
  28375. poly8x16_t __ret;
  28376. __ret = (poly8x16_t)(__p0);
  28377. return __ret;
  28378. }
  28379. #else
  28380. __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
  28381. poly8x16_t __ret;
  28382. __ret = (poly8x16_t)(__p0);
  28383. return __ret;
  28384. }
  28385. #endif
  28386. #ifdef __LITTLE_ENDIAN__
  28387. __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
  28388. poly8x16_t __ret;
  28389. __ret = (poly8x16_t)(__p0);
  28390. return __ret;
  28391. }
  28392. #else
  28393. __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
  28394. poly8x16_t __ret;
  28395. __ret = (poly8x16_t)(__p0);
  28396. return __ret;
  28397. }
  28398. #endif
  28399. #ifdef __LITTLE_ENDIAN__
  28400. __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
  28401. poly8x16_t __ret;
  28402. __ret = (poly8x16_t)(__p0);
  28403. return __ret;
  28404. }
  28405. #else
  28406. __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
  28407. poly8x16_t __ret;
  28408. __ret = (poly8x16_t)(__p0);
  28409. return __ret;
  28410. }
  28411. #endif
  28412. #ifdef __LITTLE_ENDIAN__
  28413. __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
  28414. poly8x16_t __ret;
  28415. __ret = (poly8x16_t)(__p0);
  28416. return __ret;
  28417. }
  28418. #else
  28419. __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
  28420. poly8x16_t __ret;
  28421. __ret = (poly8x16_t)(__p0);
  28422. return __ret;
  28423. }
  28424. #endif
  28425. #ifdef __LITTLE_ENDIAN__
  28426. __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
  28427. poly8x16_t __ret;
  28428. __ret = (poly8x16_t)(__p0);
  28429. return __ret;
  28430. }
  28431. #else
  28432. __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
  28433. poly8x16_t __ret;
  28434. __ret = (poly8x16_t)(__p0);
  28435. return __ret;
  28436. }
  28437. #endif
  28438. #ifdef __LITTLE_ENDIAN__
  28439. __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
  28440. poly8x16_t __ret;
  28441. __ret = (poly8x16_t)(__p0);
  28442. return __ret;
  28443. }
  28444. #else
  28445. __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
  28446. poly8x16_t __ret;
  28447. __ret = (poly8x16_t)(__p0);
  28448. return __ret;
  28449. }
  28450. #endif
  28451. #ifdef __LITTLE_ENDIAN__
  28452. __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
  28453. poly8x16_t __ret;
  28454. __ret = (poly8x16_t)(__p0);
  28455. return __ret;
  28456. }
  28457. #else
  28458. __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
  28459. poly8x16_t __ret;
  28460. __ret = (poly8x16_t)(__p0);
  28461. return __ret;
  28462. }
  28463. #endif
  28464. #ifdef __LITTLE_ENDIAN__
  28465. __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
  28466. poly8x16_t __ret;
  28467. __ret = (poly8x16_t)(__p0);
  28468. return __ret;
  28469. }
  28470. #else
  28471. __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
  28472. poly8x16_t __ret;
  28473. __ret = (poly8x16_t)(__p0);
  28474. return __ret;
  28475. }
  28476. #endif
  28477. #ifdef __LITTLE_ENDIAN__
  28478. __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
  28479. poly8x16_t __ret;
  28480. __ret = (poly8x16_t)(__p0);
  28481. return __ret;
  28482. }
  28483. #else
  28484. __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
  28485. poly8x16_t __ret;
  28486. __ret = (poly8x16_t)(__p0);
  28487. return __ret;
  28488. }
  28489. #endif
  28490. #ifdef __LITTLE_ENDIAN__
  28491. __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
  28492. poly16x8_t __ret;
  28493. __ret = (poly16x8_t)(__p0);
  28494. return __ret;
  28495. }
  28496. #else
  28497. __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
  28498. poly16x8_t __ret;
  28499. __ret = (poly16x8_t)(__p0);
  28500. return __ret;
  28501. }
  28502. #endif
  28503. #ifdef __LITTLE_ENDIAN__
  28504. __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
  28505. poly16x8_t __ret;
  28506. __ret = (poly16x8_t)(__p0);
  28507. return __ret;
  28508. }
  28509. #else
  28510. __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
  28511. poly16x8_t __ret;
  28512. __ret = (poly16x8_t)(__p0);
  28513. return __ret;
  28514. }
  28515. #endif
  28516. #ifdef __LITTLE_ENDIAN__
  28517. __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
  28518. poly16x8_t __ret;
  28519. __ret = (poly16x8_t)(__p0);
  28520. return __ret;
  28521. }
  28522. #else
  28523. __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
  28524. poly16x8_t __ret;
  28525. __ret = (poly16x8_t)(__p0);
  28526. return __ret;
  28527. }
  28528. #endif
  28529. #ifdef __LITTLE_ENDIAN__
  28530. __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
  28531. poly16x8_t __ret;
  28532. __ret = (poly16x8_t)(__p0);
  28533. return __ret;
  28534. }
  28535. #else
  28536. __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
  28537. poly16x8_t __ret;
  28538. __ret = (poly16x8_t)(__p0);
  28539. return __ret;
  28540. }
  28541. #endif
  28542. #ifdef __LITTLE_ENDIAN__
  28543. __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
  28544. poly16x8_t __ret;
  28545. __ret = (poly16x8_t)(__p0);
  28546. return __ret;
  28547. }
  28548. #else
  28549. __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
  28550. poly16x8_t __ret;
  28551. __ret = (poly16x8_t)(__p0);
  28552. return __ret;
  28553. }
  28554. #endif
  28555. #ifdef __LITTLE_ENDIAN__
  28556. __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
  28557. poly16x8_t __ret;
  28558. __ret = (poly16x8_t)(__p0);
  28559. return __ret;
  28560. }
  28561. #else
  28562. __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
  28563. poly16x8_t __ret;
  28564. __ret = (poly16x8_t)(__p0);
  28565. return __ret;
  28566. }
  28567. #endif
  28568. #ifdef __LITTLE_ENDIAN__
  28569. __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
  28570. poly16x8_t __ret;
  28571. __ret = (poly16x8_t)(__p0);
  28572. return __ret;
  28573. }
  28574. #else
  28575. __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
  28576. poly16x8_t __ret;
  28577. __ret = (poly16x8_t)(__p0);
  28578. return __ret;
  28579. }
  28580. #endif
  28581. #ifdef __LITTLE_ENDIAN__
  28582. __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
  28583. poly16x8_t __ret;
  28584. __ret = (poly16x8_t)(__p0);
  28585. return __ret;
  28586. }
  28587. #else
  28588. __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
  28589. poly16x8_t __ret;
  28590. __ret = (poly16x8_t)(__p0);
  28591. return __ret;
  28592. }
  28593. #endif
  28594. #ifdef __LITTLE_ENDIAN__
  28595. __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
  28596. poly16x8_t __ret;
  28597. __ret = (poly16x8_t)(__p0);
  28598. return __ret;
  28599. }
  28600. #else
  28601. __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
  28602. poly16x8_t __ret;
  28603. __ret = (poly16x8_t)(__p0);
  28604. return __ret;
  28605. }
  28606. #endif
  28607. #ifdef __LITTLE_ENDIAN__
  28608. __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
  28609. poly16x8_t __ret;
  28610. __ret = (poly16x8_t)(__p0);
  28611. return __ret;
  28612. }
  28613. #else
  28614. __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
  28615. poly16x8_t __ret;
  28616. __ret = (poly16x8_t)(__p0);
  28617. return __ret;
  28618. }
  28619. #endif
  28620. #ifdef __LITTLE_ENDIAN__
  28621. __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
  28622. poly16x8_t __ret;
  28623. __ret = (poly16x8_t)(__p0);
  28624. return __ret;
  28625. }
  28626. #else
  28627. __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
  28628. poly16x8_t __ret;
  28629. __ret = (poly16x8_t)(__p0);
  28630. return __ret;
  28631. }
  28632. #endif
  28633. #ifdef __LITTLE_ENDIAN__
  28634. __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
  28635. uint8x16_t __ret;
  28636. __ret = (uint8x16_t)(__p0);
  28637. return __ret;
  28638. }
  28639. #else
  28640. __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
  28641. uint8x16_t __ret;
  28642. __ret = (uint8x16_t)(__p0);
  28643. return __ret;
  28644. }
  28645. #endif
  28646. #ifdef __LITTLE_ENDIAN__
  28647. __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
  28648. uint8x16_t __ret;
  28649. __ret = (uint8x16_t)(__p0);
  28650. return __ret;
  28651. }
  28652. #else
  28653. __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
  28654. uint8x16_t __ret;
  28655. __ret = (uint8x16_t)(__p0);
  28656. return __ret;
  28657. }
  28658. #endif
  28659. #ifdef __LITTLE_ENDIAN__
  28660. __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
  28661. uint8x16_t __ret;
  28662. __ret = (uint8x16_t)(__p0);
  28663. return __ret;
  28664. }
  28665. #else
  28666. __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
  28667. uint8x16_t __ret;
  28668. __ret = (uint8x16_t)(__p0);
  28669. return __ret;
  28670. }
  28671. #endif
  28672. #ifdef __LITTLE_ENDIAN__
  28673. __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
  28674. uint8x16_t __ret;
  28675. __ret = (uint8x16_t)(__p0);
  28676. return __ret;
  28677. }
  28678. #else
  28679. __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
  28680. uint8x16_t __ret;
  28681. __ret = (uint8x16_t)(__p0);
  28682. return __ret;
  28683. }
  28684. #endif
  28685. #ifdef __LITTLE_ENDIAN__
  28686. __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
  28687. uint8x16_t __ret;
  28688. __ret = (uint8x16_t)(__p0);
  28689. return __ret;
  28690. }
  28691. #else
  28692. __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
  28693. uint8x16_t __ret;
  28694. __ret = (uint8x16_t)(__p0);
  28695. return __ret;
  28696. }
  28697. #endif
  28698. #ifdef __LITTLE_ENDIAN__
  28699. __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
  28700. uint8x16_t __ret;
  28701. __ret = (uint8x16_t)(__p0);
  28702. return __ret;
  28703. }
  28704. #else
  28705. __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
  28706. uint8x16_t __ret;
  28707. __ret = (uint8x16_t)(__p0);
  28708. return __ret;
  28709. }
  28710. #endif
  28711. #ifdef __LITTLE_ENDIAN__
  28712. __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
  28713. uint8x16_t __ret;
  28714. __ret = (uint8x16_t)(__p0);
  28715. return __ret;
  28716. }
  28717. #else
  28718. __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
  28719. uint8x16_t __ret;
  28720. __ret = (uint8x16_t)(__p0);
  28721. return __ret;
  28722. }
  28723. #endif
  28724. #ifdef __LITTLE_ENDIAN__
  28725. __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
  28726. uint8x16_t __ret;
  28727. __ret = (uint8x16_t)(__p0);
  28728. return __ret;
  28729. }
  28730. #else
  28731. __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
  28732. uint8x16_t __ret;
  28733. __ret = (uint8x16_t)(__p0);
  28734. return __ret;
  28735. }
  28736. #endif
  28737. #ifdef __LITTLE_ENDIAN__
  28738. __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
  28739. uint8x16_t __ret;
  28740. __ret = (uint8x16_t)(__p0);
  28741. return __ret;
  28742. }
  28743. #else
  28744. __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
  28745. uint8x16_t __ret;
  28746. __ret = (uint8x16_t)(__p0);
  28747. return __ret;
  28748. }
  28749. #endif
  28750. #ifdef __LITTLE_ENDIAN__
  28751. __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
  28752. uint8x16_t __ret;
  28753. __ret = (uint8x16_t)(__p0);
  28754. return __ret;
  28755. }
  28756. #else
  28757. __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
  28758. uint8x16_t __ret;
  28759. __ret = (uint8x16_t)(__p0);
  28760. return __ret;
  28761. }
  28762. #endif
  28763. #ifdef __LITTLE_ENDIAN__
  28764. __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
  28765. uint8x16_t __ret;
  28766. __ret = (uint8x16_t)(__p0);
  28767. return __ret;
  28768. }
  28769. #else
  28770. __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
  28771. uint8x16_t __ret;
  28772. __ret = (uint8x16_t)(__p0);
  28773. return __ret;
  28774. }
  28775. #endif
  28776. #ifdef __LITTLE_ENDIAN__
  28777. __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
  28778. uint32x4_t __ret;
  28779. __ret = (uint32x4_t)(__p0);
  28780. return __ret;
  28781. }
  28782. #else
  28783. __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
  28784. uint32x4_t __ret;
  28785. __ret = (uint32x4_t)(__p0);
  28786. return __ret;
  28787. }
  28788. #endif
  28789. #ifdef __LITTLE_ENDIAN__
  28790. __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
  28791. uint32x4_t __ret;
  28792. __ret = (uint32x4_t)(__p0);
  28793. return __ret;
  28794. }
  28795. #else
  28796. __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
  28797. uint32x4_t __ret;
  28798. __ret = (uint32x4_t)(__p0);
  28799. return __ret;
  28800. }
  28801. #endif
  28802. #ifdef __LITTLE_ENDIAN__
  28803. __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
  28804. uint32x4_t __ret;
  28805. __ret = (uint32x4_t)(__p0);
  28806. return __ret;
  28807. }
  28808. #else
  28809. __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
  28810. uint32x4_t __ret;
  28811. __ret = (uint32x4_t)(__p0);
  28812. return __ret;
  28813. }
  28814. #endif
  28815. #ifdef __LITTLE_ENDIAN__
  28816. __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
  28817. uint32x4_t __ret;
  28818. __ret = (uint32x4_t)(__p0);
  28819. return __ret;
  28820. }
  28821. #else
  28822. __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
  28823. uint32x4_t __ret;
  28824. __ret = (uint32x4_t)(__p0);
  28825. return __ret;
  28826. }
  28827. #endif
  28828. #ifdef __LITTLE_ENDIAN__
  28829. __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
  28830. uint32x4_t __ret;
  28831. __ret = (uint32x4_t)(__p0);
  28832. return __ret;
  28833. }
  28834. #else
  28835. __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
  28836. uint32x4_t __ret;
  28837. __ret = (uint32x4_t)(__p0);
  28838. return __ret;
  28839. }
  28840. #endif
  28841. #ifdef __LITTLE_ENDIAN__
  28842. __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
  28843. uint32x4_t __ret;
  28844. __ret = (uint32x4_t)(__p0);
  28845. return __ret;
  28846. }
  28847. #else
  28848. __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
  28849. uint32x4_t __ret;
  28850. __ret = (uint32x4_t)(__p0);
  28851. return __ret;
  28852. }
  28853. #endif
  28854. #ifdef __LITTLE_ENDIAN__
  28855. __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
  28856. uint32x4_t __ret;
  28857. __ret = (uint32x4_t)(__p0);
  28858. return __ret;
  28859. }
  28860. #else
  28861. __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
  28862. uint32x4_t __ret;
  28863. __ret = (uint32x4_t)(__p0);
  28864. return __ret;
  28865. }
  28866. #endif
  28867. #ifdef __LITTLE_ENDIAN__
  28868. __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
  28869. uint32x4_t __ret;
  28870. __ret = (uint32x4_t)(__p0);
  28871. return __ret;
  28872. }
  28873. #else
  28874. __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
  28875. uint32x4_t __ret;
  28876. __ret = (uint32x4_t)(__p0);
  28877. return __ret;
  28878. }
  28879. #endif
  28880. #ifdef __LITTLE_ENDIAN__
  28881. __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
  28882. uint32x4_t __ret;
  28883. __ret = (uint32x4_t)(__p0);
  28884. return __ret;
  28885. }
  28886. #else
  28887. __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
  28888. uint32x4_t __ret;
  28889. __ret = (uint32x4_t)(__p0);
  28890. return __ret;
  28891. }
  28892. #endif
  28893. #ifdef __LITTLE_ENDIAN__
  28894. __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
  28895. uint32x4_t __ret;
  28896. __ret = (uint32x4_t)(__p0);
  28897. return __ret;
  28898. }
  28899. #else
  28900. __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
  28901. uint32x4_t __ret;
  28902. __ret = (uint32x4_t)(__p0);
  28903. return __ret;
  28904. }
  28905. #endif
  28906. #ifdef __LITTLE_ENDIAN__
  28907. __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
  28908. uint32x4_t __ret;
  28909. __ret = (uint32x4_t)(__p0);
  28910. return __ret;
  28911. }
  28912. #else
  28913. __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
  28914. uint32x4_t __ret;
  28915. __ret = (uint32x4_t)(__p0);
  28916. return __ret;
  28917. }
  28918. #endif
  28919. #ifdef __LITTLE_ENDIAN__
  28920. __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
  28921. uint64x2_t __ret;
  28922. __ret = (uint64x2_t)(__p0);
  28923. return __ret;
  28924. }
  28925. #else
  28926. __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
  28927. uint64x2_t __ret;
  28928. __ret = (uint64x2_t)(__p0);
  28929. return __ret;
  28930. }
  28931. #endif
  28932. #ifdef __LITTLE_ENDIAN__
  28933. __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
  28934. uint64x2_t __ret;
  28935. __ret = (uint64x2_t)(__p0);
  28936. return __ret;
  28937. }
  28938. #else
  28939. __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
  28940. uint64x2_t __ret;
  28941. __ret = (uint64x2_t)(__p0);
  28942. return __ret;
  28943. }
  28944. #endif
  28945. #ifdef __LITTLE_ENDIAN__
  28946. __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
  28947. uint64x2_t __ret;
  28948. __ret = (uint64x2_t)(__p0);
  28949. return __ret;
  28950. }
  28951. #else
  28952. __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
  28953. uint64x2_t __ret;
  28954. __ret = (uint64x2_t)(__p0);
  28955. return __ret;
  28956. }
  28957. #endif
  28958. #ifdef __LITTLE_ENDIAN__
  28959. __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
  28960. uint64x2_t __ret;
  28961. __ret = (uint64x2_t)(__p0);
  28962. return __ret;
  28963. }
  28964. #else
  28965. __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
  28966. uint64x2_t __ret;
  28967. __ret = (uint64x2_t)(__p0);
  28968. return __ret;
  28969. }
  28970. #endif
  28971. #ifdef __LITTLE_ENDIAN__
  28972. __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
  28973. uint64x2_t __ret;
  28974. __ret = (uint64x2_t)(__p0);
  28975. return __ret;
  28976. }
  28977. #else
  28978. __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
  28979. uint64x2_t __ret;
  28980. __ret = (uint64x2_t)(__p0);
  28981. return __ret;
  28982. }
  28983. #endif
  28984. #ifdef __LITTLE_ENDIAN__
  28985. __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
  28986. uint64x2_t __ret;
  28987. __ret = (uint64x2_t)(__p0);
  28988. return __ret;
  28989. }
  28990. #else
  28991. __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
  28992. uint64x2_t __ret;
  28993. __ret = (uint64x2_t)(__p0);
  28994. return __ret;
  28995. }
  28996. #endif
  28997. #ifdef __LITTLE_ENDIAN__
  28998. __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
  28999. uint64x2_t __ret;
  29000. __ret = (uint64x2_t)(__p0);
  29001. return __ret;
  29002. }
  29003. #else
  29004. __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
  29005. uint64x2_t __ret;
  29006. __ret = (uint64x2_t)(__p0);
  29007. return __ret;
  29008. }
  29009. #endif
  29010. #ifdef __LITTLE_ENDIAN__
  29011. __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
  29012. uint64x2_t __ret;
  29013. __ret = (uint64x2_t)(__p0);
  29014. return __ret;
  29015. }
  29016. #else
  29017. __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
  29018. uint64x2_t __ret;
  29019. __ret = (uint64x2_t)(__p0);
  29020. return __ret;
  29021. }
  29022. #endif
  29023. #ifdef __LITTLE_ENDIAN__
  29024. __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
  29025. uint64x2_t __ret;
  29026. __ret = (uint64x2_t)(__p0);
  29027. return __ret;
  29028. }
  29029. #else
  29030. __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
  29031. uint64x2_t __ret;
  29032. __ret = (uint64x2_t)(__p0);
  29033. return __ret;
  29034. }
  29035. #endif
  29036. #ifdef __LITTLE_ENDIAN__
  29037. __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
  29038. uint64x2_t __ret;
  29039. __ret = (uint64x2_t)(__p0);
  29040. return __ret;
  29041. }
  29042. #else
  29043. __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
  29044. uint64x2_t __ret;
  29045. __ret = (uint64x2_t)(__p0);
  29046. return __ret;
  29047. }
  29048. #endif
  29049. #ifdef __LITTLE_ENDIAN__
  29050. __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
  29051. uint64x2_t __ret;
  29052. __ret = (uint64x2_t)(__p0);
  29053. return __ret;
  29054. }
  29055. #else
  29056. __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
  29057. uint64x2_t __ret;
  29058. __ret = (uint64x2_t)(__p0);
  29059. return __ret;
  29060. }
  29061. #endif
  29062. #ifdef __LITTLE_ENDIAN__
  29063. __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
  29064. uint16x8_t __ret;
  29065. __ret = (uint16x8_t)(__p0);
  29066. return __ret;
  29067. }
  29068. #else
  29069. __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
  29070. uint16x8_t __ret;
  29071. __ret = (uint16x8_t)(__p0);
  29072. return __ret;
  29073. }
  29074. #endif
  29075. #ifdef __LITTLE_ENDIAN__
  29076. __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
  29077. uint16x8_t __ret;
  29078. __ret = (uint16x8_t)(__p0);
  29079. return __ret;
  29080. }
  29081. #else
  29082. __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
  29083. uint16x8_t __ret;
  29084. __ret = (uint16x8_t)(__p0);
  29085. return __ret;
  29086. }
  29087. #endif
  29088. #ifdef __LITTLE_ENDIAN__
  29089. __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
  29090. uint16x8_t __ret;
  29091. __ret = (uint16x8_t)(__p0);
  29092. return __ret;
  29093. }
  29094. #else
  29095. __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
  29096. uint16x8_t __ret;
  29097. __ret = (uint16x8_t)(__p0);
  29098. return __ret;
  29099. }
  29100. #endif
  29101. #ifdef __LITTLE_ENDIAN__
  29102. __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
  29103. uint16x8_t __ret;
  29104. __ret = (uint16x8_t)(__p0);
  29105. return __ret;
  29106. }
  29107. #else
  29108. __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
  29109. uint16x8_t __ret;
  29110. __ret = (uint16x8_t)(__p0);
  29111. return __ret;
  29112. }
  29113. #endif
  29114. #ifdef __LITTLE_ENDIAN__
  29115. __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
  29116. uint16x8_t __ret;
  29117. __ret = (uint16x8_t)(__p0);
  29118. return __ret;
  29119. }
  29120. #else
  29121. __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
  29122. uint16x8_t __ret;
  29123. __ret = (uint16x8_t)(__p0);
  29124. return __ret;
  29125. }
  29126. #endif
  29127. #ifdef __LITTLE_ENDIAN__
  29128. __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
  29129. uint16x8_t __ret;
  29130. __ret = (uint16x8_t)(__p0);
  29131. return __ret;
  29132. }
  29133. #else
  29134. __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
  29135. uint16x8_t __ret;
  29136. __ret = (uint16x8_t)(__p0);
  29137. return __ret;
  29138. }
  29139. #endif
  29140. #ifdef __LITTLE_ENDIAN__
  29141. __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
  29142. uint16x8_t __ret;
  29143. __ret = (uint16x8_t)(__p0);
  29144. return __ret;
  29145. }
  29146. #else
  29147. __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
  29148. uint16x8_t __ret;
  29149. __ret = (uint16x8_t)(__p0);
  29150. return __ret;
  29151. }
  29152. #endif
  29153. #ifdef __LITTLE_ENDIAN__
  29154. __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
  29155. uint16x8_t __ret;
  29156. __ret = (uint16x8_t)(__p0);
  29157. return __ret;
  29158. }
  29159. #else
  29160. __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
  29161. uint16x8_t __ret;
  29162. __ret = (uint16x8_t)(__p0);
  29163. return __ret;
  29164. }
  29165. #endif
  29166. #ifdef __LITTLE_ENDIAN__
  29167. __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
  29168. uint16x8_t __ret;
  29169. __ret = (uint16x8_t)(__p0);
  29170. return __ret;
  29171. }
  29172. #else
  29173. __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
  29174. uint16x8_t __ret;
  29175. __ret = (uint16x8_t)(__p0);
  29176. return __ret;
  29177. }
  29178. #endif
  29179. #ifdef __LITTLE_ENDIAN__
  29180. __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
  29181. uint16x8_t __ret;
  29182. __ret = (uint16x8_t)(__p0);
  29183. return __ret;
  29184. }
  29185. #else
  29186. __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
  29187. uint16x8_t __ret;
  29188. __ret = (uint16x8_t)(__p0);
  29189. return __ret;
  29190. }
  29191. #endif
  29192. #ifdef __LITTLE_ENDIAN__
  29193. __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
  29194. uint16x8_t __ret;
  29195. __ret = (uint16x8_t)(__p0);
  29196. return __ret;
  29197. }
  29198. #else
  29199. __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
  29200. uint16x8_t __ret;
  29201. __ret = (uint16x8_t)(__p0);
  29202. return __ret;
  29203. }
  29204. #endif
  29205. #ifdef __LITTLE_ENDIAN__
  29206. __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
  29207. int8x16_t __ret;
  29208. __ret = (int8x16_t)(__p0);
  29209. return __ret;
  29210. }
  29211. #else
  29212. __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
  29213. int8x16_t __ret;
  29214. __ret = (int8x16_t)(__p0);
  29215. return __ret;
  29216. }
  29217. #endif
  29218. #ifdef __LITTLE_ENDIAN__
  29219. __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
  29220. int8x16_t __ret;
  29221. __ret = (int8x16_t)(__p0);
  29222. return __ret;
  29223. }
  29224. #else
  29225. __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
  29226. int8x16_t __ret;
  29227. __ret = (int8x16_t)(__p0);
  29228. return __ret;
  29229. }
  29230. #endif
  29231. #ifdef __LITTLE_ENDIAN__
  29232. __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
  29233. int8x16_t __ret;
  29234. __ret = (int8x16_t)(__p0);
  29235. return __ret;
  29236. }
  29237. #else
  29238. __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
  29239. int8x16_t __ret;
  29240. __ret = (int8x16_t)(__p0);
  29241. return __ret;
  29242. }
  29243. #endif
  29244. #ifdef __LITTLE_ENDIAN__
  29245. __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
  29246. int8x16_t __ret;
  29247. __ret = (int8x16_t)(__p0);
  29248. return __ret;
  29249. }
  29250. #else
  29251. __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
  29252. int8x16_t __ret;
  29253. __ret = (int8x16_t)(__p0);
  29254. return __ret;
  29255. }
  29256. #endif
  29257. #ifdef __LITTLE_ENDIAN__
  29258. __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
  29259. int8x16_t __ret;
  29260. __ret = (int8x16_t)(__p0);
  29261. return __ret;
  29262. }
  29263. #else
  29264. __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
  29265. int8x16_t __ret;
  29266. __ret = (int8x16_t)(__p0);
  29267. return __ret;
  29268. }
  29269. #endif
  29270. #ifdef __LITTLE_ENDIAN__
  29271. __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
  29272. int8x16_t __ret;
  29273. __ret = (int8x16_t)(__p0);
  29274. return __ret;
  29275. }
  29276. #else
  29277. __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
  29278. int8x16_t __ret;
  29279. __ret = (int8x16_t)(__p0);
  29280. return __ret;
  29281. }
  29282. #endif
  29283. #ifdef __LITTLE_ENDIAN__
  29284. __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
  29285. int8x16_t __ret;
  29286. __ret = (int8x16_t)(__p0);
  29287. return __ret;
  29288. }
  29289. #else
  29290. __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
  29291. int8x16_t __ret;
  29292. __ret = (int8x16_t)(__p0);
  29293. return __ret;
  29294. }
  29295. #endif
  29296. #ifdef __LITTLE_ENDIAN__
  29297. __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
  29298. int8x16_t __ret;
  29299. __ret = (int8x16_t)(__p0);
  29300. return __ret;
  29301. }
  29302. #else
  29303. __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
  29304. int8x16_t __ret;
  29305. __ret = (int8x16_t)(__p0);
  29306. return __ret;
  29307. }
  29308. #endif
  29309. #ifdef __LITTLE_ENDIAN__
  29310. __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
  29311. int8x16_t __ret;
  29312. __ret = (int8x16_t)(__p0);
  29313. return __ret;
  29314. }
  29315. #else
  29316. __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
  29317. int8x16_t __ret;
  29318. __ret = (int8x16_t)(__p0);
  29319. return __ret;
  29320. }
  29321. #endif
  29322. #ifdef __LITTLE_ENDIAN__
  29323. __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
  29324. int8x16_t __ret;
  29325. __ret = (int8x16_t)(__p0);
  29326. return __ret;
  29327. }
  29328. #else
  29329. __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
  29330. int8x16_t __ret;
  29331. __ret = (int8x16_t)(__p0);
  29332. return __ret;
  29333. }
  29334. #endif
  29335. #ifdef __LITTLE_ENDIAN__
  29336. __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
  29337. int8x16_t __ret;
  29338. __ret = (int8x16_t)(__p0);
  29339. return __ret;
  29340. }
  29341. #else
  29342. __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
  29343. int8x16_t __ret;
  29344. __ret = (int8x16_t)(__p0);
  29345. return __ret;
  29346. }
  29347. #endif
  29348. #ifdef __LITTLE_ENDIAN__
  29349. __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
  29350. float32x4_t __ret;
  29351. __ret = (float32x4_t)(__p0);
  29352. return __ret;
  29353. }
  29354. #else
  29355. __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
  29356. float32x4_t __ret;
  29357. __ret = (float32x4_t)(__p0);
  29358. return __ret;
  29359. }
  29360. #endif
  29361. #ifdef __LITTLE_ENDIAN__
  29362. __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
  29363. float32x4_t __ret;
  29364. __ret = (float32x4_t)(__p0);
  29365. return __ret;
  29366. }
  29367. #else
  29368. __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
  29369. float32x4_t __ret;
  29370. __ret = (float32x4_t)(__p0);
  29371. return __ret;
  29372. }
  29373. #endif
  29374. #ifdef __LITTLE_ENDIAN__
  29375. __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
  29376. float32x4_t __ret;
  29377. __ret = (float32x4_t)(__p0);
  29378. return __ret;
  29379. }
  29380. #else
  29381. __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
  29382. float32x4_t __ret;
  29383. __ret = (float32x4_t)(__p0);
  29384. return __ret;
  29385. }
  29386. #endif
  29387. #ifdef __LITTLE_ENDIAN__
  29388. __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
  29389. float32x4_t __ret;
  29390. __ret = (float32x4_t)(__p0);
  29391. return __ret;
  29392. }
  29393. #else
  29394. __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
  29395. float32x4_t __ret;
  29396. __ret = (float32x4_t)(__p0);
  29397. return __ret;
  29398. }
  29399. #endif
  29400. #ifdef __LITTLE_ENDIAN__
  29401. __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
  29402. float32x4_t __ret;
  29403. __ret = (float32x4_t)(__p0);
  29404. return __ret;
  29405. }
  29406. #else
  29407. __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
  29408. float32x4_t __ret;
  29409. __ret = (float32x4_t)(__p0);
  29410. return __ret;
  29411. }
  29412. #endif
  29413. #ifdef __LITTLE_ENDIAN__
  29414. __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
  29415. float32x4_t __ret;
  29416. __ret = (float32x4_t)(__p0);
  29417. return __ret;
  29418. }
  29419. #else
  29420. __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
  29421. float32x4_t __ret;
  29422. __ret = (float32x4_t)(__p0);
  29423. return __ret;
  29424. }
  29425. #endif
  29426. #ifdef __LITTLE_ENDIAN__
  29427. __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
  29428. float32x4_t __ret;
  29429. __ret = (float32x4_t)(__p0);
  29430. return __ret;
  29431. }
  29432. #else
  29433. __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
  29434. float32x4_t __ret;
  29435. __ret = (float32x4_t)(__p0);
  29436. return __ret;
  29437. }
  29438. #endif
  29439. #ifdef __LITTLE_ENDIAN__
  29440. __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
  29441. float32x4_t __ret;
  29442. __ret = (float32x4_t)(__p0);
  29443. return __ret;
  29444. }
  29445. #else
  29446. __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
  29447. float32x4_t __ret;
  29448. __ret = (float32x4_t)(__p0);
  29449. return __ret;
  29450. }
  29451. #endif
  29452. #ifdef __LITTLE_ENDIAN__
  29453. __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
  29454. float32x4_t __ret;
  29455. __ret = (float32x4_t)(__p0);
  29456. return __ret;
  29457. }
  29458. #else
  29459. __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
  29460. float32x4_t __ret;
  29461. __ret = (float32x4_t)(__p0);
  29462. return __ret;
  29463. }
  29464. #endif
  29465. #ifdef __LITTLE_ENDIAN__
  29466. __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
  29467. float32x4_t __ret;
  29468. __ret = (float32x4_t)(__p0);
  29469. return __ret;
  29470. }
  29471. #else
  29472. __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
  29473. float32x4_t __ret;
  29474. __ret = (float32x4_t)(__p0);
  29475. return __ret;
  29476. }
  29477. #endif
  29478. #ifdef __LITTLE_ENDIAN__
  29479. __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
  29480. float32x4_t __ret;
  29481. __ret = (float32x4_t)(__p0);
  29482. return __ret;
  29483. }
  29484. #else
  29485. __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
  29486. float32x4_t __ret;
  29487. __ret = (float32x4_t)(__p0);
  29488. return __ret;
  29489. }
  29490. #endif
  29491. #ifdef __LITTLE_ENDIAN__
  29492. __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
  29493. float16x8_t __ret;
  29494. __ret = (float16x8_t)(__p0);
  29495. return __ret;
  29496. }
  29497. #else
  29498. __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
  29499. float16x8_t __ret;
  29500. __ret = (float16x8_t)(__p0);
  29501. return __ret;
  29502. }
  29503. #endif
  29504. #ifdef __LITTLE_ENDIAN__
  29505. __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
  29506. float16x8_t __ret;
  29507. __ret = (float16x8_t)(__p0);
  29508. return __ret;
  29509. }
  29510. #else
  29511. __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
  29512. float16x8_t __ret;
  29513. __ret = (float16x8_t)(__p0);
  29514. return __ret;
  29515. }
  29516. #endif
  29517. #ifdef __LITTLE_ENDIAN__
  29518. __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
  29519. float16x8_t __ret;
  29520. __ret = (float16x8_t)(__p0);
  29521. return __ret;
  29522. }
  29523. #else
  29524. __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
  29525. float16x8_t __ret;
  29526. __ret = (float16x8_t)(__p0);
  29527. return __ret;
  29528. }
  29529. #endif
  29530. #ifdef __LITTLE_ENDIAN__
  29531. __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
  29532. float16x8_t __ret;
  29533. __ret = (float16x8_t)(__p0);
  29534. return __ret;
  29535. }
  29536. #else
  29537. __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
  29538. float16x8_t __ret;
  29539. __ret = (float16x8_t)(__p0);
  29540. return __ret;
  29541. }
  29542. #endif
  29543. #ifdef __LITTLE_ENDIAN__
  29544. __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
  29545. float16x8_t __ret;
  29546. __ret = (float16x8_t)(__p0);
  29547. return __ret;
  29548. }
  29549. #else
  29550. __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
  29551. float16x8_t __ret;
  29552. __ret = (float16x8_t)(__p0);
  29553. return __ret;
  29554. }
  29555. #endif
  29556. #ifdef __LITTLE_ENDIAN__
  29557. __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
  29558. float16x8_t __ret;
  29559. __ret = (float16x8_t)(__p0);
  29560. return __ret;
  29561. }
  29562. #else
  29563. __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
  29564. float16x8_t __ret;
  29565. __ret = (float16x8_t)(__p0);
  29566. return __ret;
  29567. }
  29568. #endif
  29569. #ifdef __LITTLE_ENDIAN__
  29570. __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
  29571. float16x8_t __ret;
  29572. __ret = (float16x8_t)(__p0);
  29573. return __ret;
  29574. }
  29575. #else
  29576. __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
  29577. float16x8_t __ret;
  29578. __ret = (float16x8_t)(__p0);
  29579. return __ret;
  29580. }
  29581. #endif
  29582. #ifdef __LITTLE_ENDIAN__
  29583. __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
  29584. float16x8_t __ret;
  29585. __ret = (float16x8_t)(__p0);
  29586. return __ret;
  29587. }
  29588. #else
  29589. __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
  29590. float16x8_t __ret;
  29591. __ret = (float16x8_t)(__p0);
  29592. return __ret;
  29593. }
  29594. #endif
  29595. #ifdef __LITTLE_ENDIAN__
  29596. __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
  29597. float16x8_t __ret;
  29598. __ret = (float16x8_t)(__p0);
  29599. return __ret;
  29600. }
  29601. #else
  29602. __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
  29603. float16x8_t __ret;
  29604. __ret = (float16x8_t)(__p0);
  29605. return __ret;
  29606. }
  29607. #endif
  29608. #ifdef __LITTLE_ENDIAN__
  29609. __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
  29610. float16x8_t __ret;
  29611. __ret = (float16x8_t)(__p0);
  29612. return __ret;
  29613. }
  29614. #else
  29615. __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
  29616. float16x8_t __ret;
  29617. __ret = (float16x8_t)(__p0);
  29618. return __ret;
  29619. }
  29620. #endif
  29621. #ifdef __LITTLE_ENDIAN__
  29622. __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
  29623. float16x8_t __ret;
  29624. __ret = (float16x8_t)(__p0);
  29625. return __ret;
  29626. }
  29627. #else
  29628. __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
  29629. float16x8_t __ret;
  29630. __ret = (float16x8_t)(__p0);
  29631. return __ret;
  29632. }
  29633. #endif
  29634. #ifdef __LITTLE_ENDIAN__
  29635. __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
  29636. int32x4_t __ret;
  29637. __ret = (int32x4_t)(__p0);
  29638. return __ret;
  29639. }
  29640. #else
  29641. __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
  29642. int32x4_t __ret;
  29643. __ret = (int32x4_t)(__p0);
  29644. return __ret;
  29645. }
  29646. #endif
  29647. #ifdef __LITTLE_ENDIAN__
  29648. __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
  29649. int32x4_t __ret;
  29650. __ret = (int32x4_t)(__p0);
  29651. return __ret;
  29652. }
  29653. #else
  29654. __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
  29655. int32x4_t __ret;
  29656. __ret = (int32x4_t)(__p0);
  29657. return __ret;
  29658. }
  29659. #endif
  29660. #ifdef __LITTLE_ENDIAN__
  29661. __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
  29662. int32x4_t __ret;
  29663. __ret = (int32x4_t)(__p0);
  29664. return __ret;
  29665. }
  29666. #else
  29667. __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
  29668. int32x4_t __ret;
  29669. __ret = (int32x4_t)(__p0);
  29670. return __ret;
  29671. }
  29672. #endif
  29673. #ifdef __LITTLE_ENDIAN__
  29674. __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
  29675. int32x4_t __ret;
  29676. __ret = (int32x4_t)(__p0);
  29677. return __ret;
  29678. }
  29679. #else
  29680. __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
  29681. int32x4_t __ret;
  29682. __ret = (int32x4_t)(__p0);
  29683. return __ret;
  29684. }
  29685. #endif
  29686. #ifdef __LITTLE_ENDIAN__
  29687. __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
  29688. int32x4_t __ret;
  29689. __ret = (int32x4_t)(__p0);
  29690. return __ret;
  29691. }
  29692. #else
  29693. __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
  29694. int32x4_t __ret;
  29695. __ret = (int32x4_t)(__p0);
  29696. return __ret;
  29697. }
  29698. #endif
  29699. #ifdef __LITTLE_ENDIAN__
  29700. __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
  29701. int32x4_t __ret;
  29702. __ret = (int32x4_t)(__p0);
  29703. return __ret;
  29704. }
  29705. #else
  29706. __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
  29707. int32x4_t __ret;
  29708. __ret = (int32x4_t)(__p0);
  29709. return __ret;
  29710. }
  29711. #endif
  29712. #ifdef __LITTLE_ENDIAN__
  29713. __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
  29714. int32x4_t __ret;
  29715. __ret = (int32x4_t)(__p0);
  29716. return __ret;
  29717. }
  29718. #else
  29719. __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
  29720. int32x4_t __ret;
  29721. __ret = (int32x4_t)(__p0);
  29722. return __ret;
  29723. }
  29724. #endif
  29725. #ifdef __LITTLE_ENDIAN__
  29726. __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
  29727. int32x4_t __ret;
  29728. __ret = (int32x4_t)(__p0);
  29729. return __ret;
  29730. }
  29731. #else
  29732. __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
  29733. int32x4_t __ret;
  29734. __ret = (int32x4_t)(__p0);
  29735. return __ret;
  29736. }
  29737. #endif
  29738. #ifdef __LITTLE_ENDIAN__
  29739. __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
  29740. int32x4_t __ret;
  29741. __ret = (int32x4_t)(__p0);
  29742. return __ret;
  29743. }
  29744. #else
  29745. __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
  29746. int32x4_t __ret;
  29747. __ret = (int32x4_t)(__p0);
  29748. return __ret;
  29749. }
  29750. #endif
  29751. #ifdef __LITTLE_ENDIAN__
  29752. __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
  29753. int32x4_t __ret;
  29754. __ret = (int32x4_t)(__p0);
  29755. return __ret;
  29756. }
  29757. #else
  29758. __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
  29759. int32x4_t __ret;
  29760. __ret = (int32x4_t)(__p0);
  29761. return __ret;
  29762. }
  29763. #endif
  29764. #ifdef __LITTLE_ENDIAN__
  29765. __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
  29766. int32x4_t __ret;
  29767. __ret = (int32x4_t)(__p0);
  29768. return __ret;
  29769. }
  29770. #else
  29771. __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
  29772. int32x4_t __ret;
  29773. __ret = (int32x4_t)(__p0);
  29774. return __ret;
  29775. }
  29776. #endif
  29777. #ifdef __LITTLE_ENDIAN__
  29778. __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
  29779. int64x2_t __ret;
  29780. __ret = (int64x2_t)(__p0);
  29781. return __ret;
  29782. }
  29783. #else
  29784. __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
  29785. int64x2_t __ret;
  29786. __ret = (int64x2_t)(__p0);
  29787. return __ret;
  29788. }
  29789. #endif
  29790. #ifdef __LITTLE_ENDIAN__
  29791. __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
  29792. int64x2_t __ret;
  29793. __ret = (int64x2_t)(__p0);
  29794. return __ret;
  29795. }
  29796. #else
  29797. __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
  29798. int64x2_t __ret;
  29799. __ret = (int64x2_t)(__p0);
  29800. return __ret;
  29801. }
  29802. #endif
  29803. #ifdef __LITTLE_ENDIAN__
  29804. __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
  29805. int64x2_t __ret;
  29806. __ret = (int64x2_t)(__p0);
  29807. return __ret;
  29808. }
  29809. #else
  29810. __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
  29811. int64x2_t __ret;
  29812. __ret = (int64x2_t)(__p0);
  29813. return __ret;
  29814. }
  29815. #endif
  29816. #ifdef __LITTLE_ENDIAN__
  29817. __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
  29818. int64x2_t __ret;
  29819. __ret = (int64x2_t)(__p0);
  29820. return __ret;
  29821. }
  29822. #else
  29823. __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
  29824. int64x2_t __ret;
  29825. __ret = (int64x2_t)(__p0);
  29826. return __ret;
  29827. }
  29828. #endif
  29829. #ifdef __LITTLE_ENDIAN__
  29830. __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
  29831. int64x2_t __ret;
  29832. __ret = (int64x2_t)(__p0);
  29833. return __ret;
  29834. }
  29835. #else
  29836. __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
  29837. int64x2_t __ret;
  29838. __ret = (int64x2_t)(__p0);
  29839. return __ret;
  29840. }
  29841. #endif
  29842. #ifdef __LITTLE_ENDIAN__
  29843. __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
  29844. int64x2_t __ret;
  29845. __ret = (int64x2_t)(__p0);
  29846. return __ret;
  29847. }
  29848. #else
  29849. __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
  29850. int64x2_t __ret;
  29851. __ret = (int64x2_t)(__p0);
  29852. return __ret;
  29853. }
  29854. #endif
  29855. #ifdef __LITTLE_ENDIAN__
  29856. __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
  29857. int64x2_t __ret;
  29858. __ret = (int64x2_t)(__p0);
  29859. return __ret;
  29860. }
  29861. #else
  29862. __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
  29863. int64x2_t __ret;
  29864. __ret = (int64x2_t)(__p0);
  29865. return __ret;
  29866. }
  29867. #endif
  29868. #ifdef __LITTLE_ENDIAN__
  29869. __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
  29870. int64x2_t __ret;
  29871. __ret = (int64x2_t)(__p0);
  29872. return __ret;
  29873. }
  29874. #else
  29875. __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
  29876. int64x2_t __ret;
  29877. __ret = (int64x2_t)(__p0);
  29878. return __ret;
  29879. }
  29880. #endif
  29881. #ifdef __LITTLE_ENDIAN__
  29882. __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
  29883. int64x2_t __ret;
  29884. __ret = (int64x2_t)(__p0);
  29885. return __ret;
  29886. }
  29887. #else
  29888. __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
  29889. int64x2_t __ret;
  29890. __ret = (int64x2_t)(__p0);
  29891. return __ret;
  29892. }
  29893. #endif
  29894. #ifdef __LITTLE_ENDIAN__
  29895. __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
  29896. int64x2_t __ret;
  29897. __ret = (int64x2_t)(__p0);
  29898. return __ret;
  29899. }
  29900. #else
  29901. __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
  29902. int64x2_t __ret;
  29903. __ret = (int64x2_t)(__p0);
  29904. return __ret;
  29905. }
  29906. #endif
  29907. #ifdef __LITTLE_ENDIAN__
  29908. __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
  29909. int64x2_t __ret;
  29910. __ret = (int64x2_t)(__p0);
  29911. return __ret;
  29912. }
  29913. #else
  29914. __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
  29915. int64x2_t __ret;
  29916. __ret = (int64x2_t)(__p0);
  29917. return __ret;
  29918. }
  29919. #endif
  29920. #ifdef __LITTLE_ENDIAN__
  29921. __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
  29922. int16x8_t __ret;
  29923. __ret = (int16x8_t)(__p0);
  29924. return __ret;
  29925. }
  29926. #else
  29927. __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
  29928. int16x8_t __ret;
  29929. __ret = (int16x8_t)(__p0);
  29930. return __ret;
  29931. }
  29932. #endif
  29933. #ifdef __LITTLE_ENDIAN__
  29934. __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
  29935. int16x8_t __ret;
  29936. __ret = (int16x8_t)(__p0);
  29937. return __ret;
  29938. }
  29939. #else
  29940. __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
  29941. int16x8_t __ret;
  29942. __ret = (int16x8_t)(__p0);
  29943. return __ret;
  29944. }
  29945. #endif
  29946. #ifdef __LITTLE_ENDIAN__
  29947. __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
  29948. int16x8_t __ret;
  29949. __ret = (int16x8_t)(__p0);
  29950. return __ret;
  29951. }
  29952. #else
  29953. __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
  29954. int16x8_t __ret;
  29955. __ret = (int16x8_t)(__p0);
  29956. return __ret;
  29957. }
  29958. #endif
  29959. #ifdef __LITTLE_ENDIAN__
  29960. __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
  29961. int16x8_t __ret;
  29962. __ret = (int16x8_t)(__p0);
  29963. return __ret;
  29964. }
  29965. #else
  29966. __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
  29967. int16x8_t __ret;
  29968. __ret = (int16x8_t)(__p0);
  29969. return __ret;
  29970. }
  29971. #endif
  29972. #ifdef __LITTLE_ENDIAN__
  29973. __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
  29974. int16x8_t __ret;
  29975. __ret = (int16x8_t)(__p0);
  29976. return __ret;
  29977. }
  29978. #else
  29979. __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
  29980. int16x8_t __ret;
  29981. __ret = (int16x8_t)(__p0);
  29982. return __ret;
  29983. }
  29984. #endif
  29985. #ifdef __LITTLE_ENDIAN__
  29986. __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
  29987. int16x8_t __ret;
  29988. __ret = (int16x8_t)(__p0);
  29989. return __ret;
  29990. }
  29991. #else
  29992. __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
  29993. int16x8_t __ret;
  29994. __ret = (int16x8_t)(__p0);
  29995. return __ret;
  29996. }
  29997. #endif
  29998. #ifdef __LITTLE_ENDIAN__
  29999. __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
  30000. int16x8_t __ret;
  30001. __ret = (int16x8_t)(__p0);
  30002. return __ret;
  30003. }
  30004. #else
  30005. __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
  30006. int16x8_t __ret;
  30007. __ret = (int16x8_t)(__p0);
  30008. return __ret;
  30009. }
  30010. #endif
  30011. #ifdef __LITTLE_ENDIAN__
  30012. __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
  30013. int16x8_t __ret;
  30014. __ret = (int16x8_t)(__p0);
  30015. return __ret;
  30016. }
  30017. #else
  30018. __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
  30019. int16x8_t __ret;
  30020. __ret = (int16x8_t)(__p0);
  30021. return __ret;
  30022. }
  30023. #endif
  30024. #ifdef __LITTLE_ENDIAN__
  30025. __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
  30026. int16x8_t __ret;
  30027. __ret = (int16x8_t)(__p0);
  30028. return __ret;
  30029. }
  30030. #else
  30031. __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
  30032. int16x8_t __ret;
  30033. __ret = (int16x8_t)(__p0);
  30034. return __ret;
  30035. }
  30036. #endif
  30037. #ifdef __LITTLE_ENDIAN__
  30038. __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
  30039. int16x8_t __ret;
  30040. __ret = (int16x8_t)(__p0);
  30041. return __ret;
  30042. }
  30043. #else
  30044. __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
  30045. int16x8_t __ret;
  30046. __ret = (int16x8_t)(__p0);
  30047. return __ret;
  30048. }
  30049. #endif
  30050. #ifdef __LITTLE_ENDIAN__
  30051. __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
  30052. int16x8_t __ret;
  30053. __ret = (int16x8_t)(__p0);
  30054. return __ret;
  30055. }
  30056. #else
  30057. __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
  30058. int16x8_t __ret;
  30059. __ret = (int16x8_t)(__p0);
  30060. return __ret;
  30061. }
  30062. #endif
  30063. #ifdef __LITTLE_ENDIAN__
  30064. __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
  30065. uint8x8_t __ret;
  30066. __ret = (uint8x8_t)(__p0);
  30067. return __ret;
  30068. }
  30069. #else
  30070. __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
  30071. uint8x8_t __ret;
  30072. __ret = (uint8x8_t)(__p0);
  30073. return __ret;
  30074. }
  30075. #endif
  30076. #ifdef __LITTLE_ENDIAN__
  30077. __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
  30078. uint8x8_t __ret;
  30079. __ret = (uint8x8_t)(__p0);
  30080. return __ret;
  30081. }
  30082. #else
  30083. __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
  30084. uint8x8_t __ret;
  30085. __ret = (uint8x8_t)(__p0);
  30086. return __ret;
  30087. }
  30088. #endif
  30089. #ifdef __LITTLE_ENDIAN__
  30090. __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
  30091. uint8x8_t __ret;
  30092. __ret = (uint8x8_t)(__p0);
  30093. return __ret;
  30094. }
  30095. #else
  30096. __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
  30097. uint8x8_t __ret;
  30098. __ret = (uint8x8_t)(__p0);
  30099. return __ret;
  30100. }
  30101. #endif
  30102. #ifdef __LITTLE_ENDIAN__
  30103. __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
  30104. uint8x8_t __ret;
  30105. __ret = (uint8x8_t)(__p0);
  30106. return __ret;
  30107. }
  30108. #else
  30109. __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
  30110. uint8x8_t __ret;
  30111. __ret = (uint8x8_t)(__p0);
  30112. return __ret;
  30113. }
  30114. #endif
  30115. #ifdef __LITTLE_ENDIAN__
  30116. __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
  30117. uint8x8_t __ret;
  30118. __ret = (uint8x8_t)(__p0);
  30119. return __ret;
  30120. }
  30121. #else
  30122. __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
  30123. uint8x8_t __ret;
  30124. __ret = (uint8x8_t)(__p0);
  30125. return __ret;
  30126. }
  30127. #endif
  30128. #ifdef __LITTLE_ENDIAN__
  30129. __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
  30130. uint8x8_t __ret;
  30131. __ret = (uint8x8_t)(__p0);
  30132. return __ret;
  30133. }
  30134. #else
  30135. __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
  30136. uint8x8_t __ret;
  30137. __ret = (uint8x8_t)(__p0);
  30138. return __ret;
  30139. }
  30140. #endif
  30141. #ifdef __LITTLE_ENDIAN__
  30142. __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
  30143. uint8x8_t __ret;
  30144. __ret = (uint8x8_t)(__p0);
  30145. return __ret;
  30146. }
  30147. #else
  30148. __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
  30149. uint8x8_t __ret;
  30150. __ret = (uint8x8_t)(__p0);
  30151. return __ret;
  30152. }
  30153. #endif
  30154. #ifdef __LITTLE_ENDIAN__
  30155. __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
  30156. uint8x8_t __ret;
  30157. __ret = (uint8x8_t)(__p0);
  30158. return __ret;
  30159. }
  30160. #else
  30161. __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
  30162. uint8x8_t __ret;
  30163. __ret = (uint8x8_t)(__p0);
  30164. return __ret;
  30165. }
  30166. #endif
  30167. #ifdef __LITTLE_ENDIAN__
  30168. __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
  30169. uint8x8_t __ret;
  30170. __ret = (uint8x8_t)(__p0);
  30171. return __ret;
  30172. }
  30173. #else
  30174. __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
  30175. uint8x8_t __ret;
  30176. __ret = (uint8x8_t)(__p0);
  30177. return __ret;
  30178. }
  30179. #endif
  30180. #ifdef __LITTLE_ENDIAN__
  30181. __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
  30182. uint8x8_t __ret;
  30183. __ret = (uint8x8_t)(__p0);
  30184. return __ret;
  30185. }
  30186. #else
  30187. __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
  30188. uint8x8_t __ret;
  30189. __ret = (uint8x8_t)(__p0);
  30190. return __ret;
  30191. }
  30192. #endif
  30193. #ifdef __LITTLE_ENDIAN__
  30194. __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
  30195. uint8x8_t __ret;
  30196. __ret = (uint8x8_t)(__p0);
  30197. return __ret;
  30198. }
  30199. #else
  30200. __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
  30201. uint8x8_t __ret;
  30202. __ret = (uint8x8_t)(__p0);
  30203. return __ret;
  30204. }
  30205. #endif
  30206. #ifdef __LITTLE_ENDIAN__
  30207. __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
  30208. uint32x2_t __ret;
  30209. __ret = (uint32x2_t)(__p0);
  30210. return __ret;
  30211. }
  30212. #else
  30213. __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
  30214. uint32x2_t __ret;
  30215. __ret = (uint32x2_t)(__p0);
  30216. return __ret;
  30217. }
  30218. #endif
  30219. #ifdef __LITTLE_ENDIAN__
  30220. __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
  30221. uint32x2_t __ret;
  30222. __ret = (uint32x2_t)(__p0);
  30223. return __ret;
  30224. }
  30225. #else
  30226. __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
  30227. uint32x2_t __ret;
  30228. __ret = (uint32x2_t)(__p0);
  30229. return __ret;
  30230. }
  30231. #endif
  30232. #ifdef __LITTLE_ENDIAN__
  30233. __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
  30234. uint32x2_t __ret;
  30235. __ret = (uint32x2_t)(__p0);
  30236. return __ret;
  30237. }
  30238. #else
  30239. __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
  30240. uint32x2_t __ret;
  30241. __ret = (uint32x2_t)(__p0);
  30242. return __ret;
  30243. }
  30244. #endif
  30245. #ifdef __LITTLE_ENDIAN__
  30246. __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
  30247. uint32x2_t __ret;
  30248. __ret = (uint32x2_t)(__p0);
  30249. return __ret;
  30250. }
  30251. #else
  30252. __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
  30253. uint32x2_t __ret;
  30254. __ret = (uint32x2_t)(__p0);
  30255. return __ret;
  30256. }
  30257. #endif
  30258. #ifdef __LITTLE_ENDIAN__
  30259. __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
  30260. uint32x2_t __ret;
  30261. __ret = (uint32x2_t)(__p0);
  30262. return __ret;
  30263. }
  30264. #else
  30265. __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
  30266. uint32x2_t __ret;
  30267. __ret = (uint32x2_t)(__p0);
  30268. return __ret;
  30269. }
  30270. #endif
  30271. #ifdef __LITTLE_ENDIAN__
  30272. __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
  30273. uint32x2_t __ret;
  30274. __ret = (uint32x2_t)(__p0);
  30275. return __ret;
  30276. }
  30277. #else
  30278. __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
  30279. uint32x2_t __ret;
  30280. __ret = (uint32x2_t)(__p0);
  30281. return __ret;
  30282. }
  30283. #endif
  30284. #ifdef __LITTLE_ENDIAN__
  30285. __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
  30286. uint32x2_t __ret;
  30287. __ret = (uint32x2_t)(__p0);
  30288. return __ret;
  30289. }
  30290. #else
  30291. __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
  30292. uint32x2_t __ret;
  30293. __ret = (uint32x2_t)(__p0);
  30294. return __ret;
  30295. }
  30296. #endif
  30297. #ifdef __LITTLE_ENDIAN__
  30298. __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
  30299. uint32x2_t __ret;
  30300. __ret = (uint32x2_t)(__p0);
  30301. return __ret;
  30302. }
  30303. #else
  30304. __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
  30305. uint32x2_t __ret;
  30306. __ret = (uint32x2_t)(__p0);
  30307. return __ret;
  30308. }
  30309. #endif
  30310. #ifdef __LITTLE_ENDIAN__
  30311. __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
  30312. uint32x2_t __ret;
  30313. __ret = (uint32x2_t)(__p0);
  30314. return __ret;
  30315. }
  30316. #else
  30317. __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
  30318. uint32x2_t __ret;
  30319. __ret = (uint32x2_t)(__p0);
  30320. return __ret;
  30321. }
  30322. #endif
  30323. #ifdef __LITTLE_ENDIAN__
  30324. __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
  30325. uint32x2_t __ret;
  30326. __ret = (uint32x2_t)(__p0);
  30327. return __ret;
  30328. }
  30329. #else
  30330. __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
  30331. uint32x2_t __ret;
  30332. __ret = (uint32x2_t)(__p0);
  30333. return __ret;
  30334. }
  30335. #endif
  30336. #ifdef __LITTLE_ENDIAN__
  30337. __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
  30338. uint32x2_t __ret;
  30339. __ret = (uint32x2_t)(__p0);
  30340. return __ret;
  30341. }
  30342. #else
  30343. __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
  30344. uint32x2_t __ret;
  30345. __ret = (uint32x2_t)(__p0);
  30346. return __ret;
  30347. }
  30348. #endif
  30349. #ifdef __LITTLE_ENDIAN__
  30350. __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
  30351. uint64x1_t __ret;
  30352. __ret = (uint64x1_t)(__p0);
  30353. return __ret;
  30354. }
  30355. #else
  30356. __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
  30357. uint64x1_t __ret;
  30358. __ret = (uint64x1_t)(__p0);
  30359. return __ret;
  30360. }
  30361. #endif
  30362. #ifdef __LITTLE_ENDIAN__
  30363. __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
  30364. uint64x1_t __ret;
  30365. __ret = (uint64x1_t)(__p0);
  30366. return __ret;
  30367. }
  30368. #else
  30369. __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
  30370. uint64x1_t __ret;
  30371. __ret = (uint64x1_t)(__p0);
  30372. return __ret;
  30373. }
  30374. #endif
  30375. #ifdef __LITTLE_ENDIAN__
  30376. __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
  30377. uint64x1_t __ret;
  30378. __ret = (uint64x1_t)(__p0);
  30379. return __ret;
  30380. }
  30381. #else
  30382. __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
  30383. uint64x1_t __ret;
  30384. __ret = (uint64x1_t)(__p0);
  30385. return __ret;
  30386. }
  30387. #endif
  30388. #ifdef __LITTLE_ENDIAN__
  30389. __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
  30390. uint64x1_t __ret;
  30391. __ret = (uint64x1_t)(__p0);
  30392. return __ret;
  30393. }
  30394. #else
  30395. __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
  30396. uint64x1_t __ret;
  30397. __ret = (uint64x1_t)(__p0);
  30398. return __ret;
  30399. }
  30400. #endif
  30401. #ifdef __LITTLE_ENDIAN__
  30402. __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
  30403. uint64x1_t __ret;
  30404. __ret = (uint64x1_t)(__p0);
  30405. return __ret;
  30406. }
  30407. #else
  30408. __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
  30409. uint64x1_t __ret;
  30410. __ret = (uint64x1_t)(__p0);
  30411. return __ret;
  30412. }
  30413. #endif
  30414. #ifdef __LITTLE_ENDIAN__
  30415. __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
  30416. uint64x1_t __ret;
  30417. __ret = (uint64x1_t)(__p0);
  30418. return __ret;
  30419. }
  30420. #else
  30421. __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
  30422. uint64x1_t __ret;
  30423. __ret = (uint64x1_t)(__p0);
  30424. return __ret;
  30425. }
  30426. #endif
  30427. #ifdef __LITTLE_ENDIAN__
  30428. __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
  30429. uint64x1_t __ret;
  30430. __ret = (uint64x1_t)(__p0);
  30431. return __ret;
  30432. }
  30433. #else
  30434. __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
  30435. uint64x1_t __ret;
  30436. __ret = (uint64x1_t)(__p0);
  30437. return __ret;
  30438. }
  30439. #endif
  30440. #ifdef __LITTLE_ENDIAN__
  30441. __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
  30442. uint64x1_t __ret;
  30443. __ret = (uint64x1_t)(__p0);
  30444. return __ret;
  30445. }
  30446. #else
  30447. __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
  30448. uint64x1_t __ret;
  30449. __ret = (uint64x1_t)(__p0);
  30450. return __ret;
  30451. }
  30452. #endif
  30453. #ifdef __LITTLE_ENDIAN__
  30454. __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
  30455. uint64x1_t __ret;
  30456. __ret = (uint64x1_t)(__p0);
  30457. return __ret;
  30458. }
  30459. #else
  30460. __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
  30461. uint64x1_t __ret;
  30462. __ret = (uint64x1_t)(__p0);
  30463. return __ret;
  30464. }
  30465. #endif
  30466. #ifdef __LITTLE_ENDIAN__
  30467. __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
  30468. uint64x1_t __ret;
  30469. __ret = (uint64x1_t)(__p0);
  30470. return __ret;
  30471. }
  30472. #else
  30473. __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
  30474. uint64x1_t __ret;
  30475. __ret = (uint64x1_t)(__p0);
  30476. return __ret;
  30477. }
  30478. #endif
  30479. #ifdef __LITTLE_ENDIAN__
  30480. __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
  30481. uint64x1_t __ret;
  30482. __ret = (uint64x1_t)(__p0);
  30483. return __ret;
  30484. }
  30485. #else
  30486. __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
  30487. uint64x1_t __ret;
  30488. __ret = (uint64x1_t)(__p0);
  30489. return __ret;
  30490. }
  30491. #endif
  30492. #ifdef __LITTLE_ENDIAN__
  30493. __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
  30494. uint16x4_t __ret;
  30495. __ret = (uint16x4_t)(__p0);
  30496. return __ret;
  30497. }
  30498. #else
  30499. __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
  30500. uint16x4_t __ret;
  30501. __ret = (uint16x4_t)(__p0);
  30502. return __ret;
  30503. }
  30504. #endif
  30505. #ifdef __LITTLE_ENDIAN__
  30506. __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
  30507. uint16x4_t __ret;
  30508. __ret = (uint16x4_t)(__p0);
  30509. return __ret;
  30510. }
  30511. #else
  30512. __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
  30513. uint16x4_t __ret;
  30514. __ret = (uint16x4_t)(__p0);
  30515. return __ret;
  30516. }
  30517. #endif
  30518. #ifdef __LITTLE_ENDIAN__
  30519. __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
  30520. uint16x4_t __ret;
  30521. __ret = (uint16x4_t)(__p0);
  30522. return __ret;
  30523. }
  30524. #else
  30525. __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
  30526. uint16x4_t __ret;
  30527. __ret = (uint16x4_t)(__p0);
  30528. return __ret;
  30529. }
  30530. #endif
  30531. #ifdef __LITTLE_ENDIAN__
  30532. __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
  30533. uint16x4_t __ret;
  30534. __ret = (uint16x4_t)(__p0);
  30535. return __ret;
  30536. }
  30537. #else
  30538. __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
  30539. uint16x4_t __ret;
  30540. __ret = (uint16x4_t)(__p0);
  30541. return __ret;
  30542. }
  30543. #endif
  30544. #ifdef __LITTLE_ENDIAN__
  30545. __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
  30546. uint16x4_t __ret;
  30547. __ret = (uint16x4_t)(__p0);
  30548. return __ret;
  30549. }
  30550. #else
  30551. __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
  30552. uint16x4_t __ret;
  30553. __ret = (uint16x4_t)(__p0);
  30554. return __ret;
  30555. }
  30556. #endif
  30557. #ifdef __LITTLE_ENDIAN__
  30558. __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
  30559. uint16x4_t __ret;
  30560. __ret = (uint16x4_t)(__p0);
  30561. return __ret;
  30562. }
  30563. #else
  30564. __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
  30565. uint16x4_t __ret;
  30566. __ret = (uint16x4_t)(__p0);
  30567. return __ret;
  30568. }
  30569. #endif
  30570. #ifdef __LITTLE_ENDIAN__
  30571. __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
  30572. uint16x4_t __ret;
  30573. __ret = (uint16x4_t)(__p0);
  30574. return __ret;
  30575. }
  30576. #else
  30577. __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
  30578. uint16x4_t __ret;
  30579. __ret = (uint16x4_t)(__p0);
  30580. return __ret;
  30581. }
  30582. #endif
  30583. #ifdef __LITTLE_ENDIAN__
  30584. __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
  30585. uint16x4_t __ret;
  30586. __ret = (uint16x4_t)(__p0);
  30587. return __ret;
  30588. }
  30589. #else
  30590. __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
  30591. uint16x4_t __ret;
  30592. __ret = (uint16x4_t)(__p0);
  30593. return __ret;
  30594. }
  30595. #endif
  30596. #ifdef __LITTLE_ENDIAN__
  30597. __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
  30598. uint16x4_t __ret;
  30599. __ret = (uint16x4_t)(__p0);
  30600. return __ret;
  30601. }
  30602. #else
  30603. __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
  30604. uint16x4_t __ret;
  30605. __ret = (uint16x4_t)(__p0);
  30606. return __ret;
  30607. }
  30608. #endif
  30609. #ifdef __LITTLE_ENDIAN__
  30610. __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
  30611. uint16x4_t __ret;
  30612. __ret = (uint16x4_t)(__p0);
  30613. return __ret;
  30614. }
  30615. #else
  30616. __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
  30617. uint16x4_t __ret;
  30618. __ret = (uint16x4_t)(__p0);
  30619. return __ret;
  30620. }
  30621. #endif
  30622. #ifdef __LITTLE_ENDIAN__
  30623. __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
  30624. uint16x4_t __ret;
  30625. __ret = (uint16x4_t)(__p0);
  30626. return __ret;
  30627. }
  30628. #else
  30629. __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
  30630. uint16x4_t __ret;
  30631. __ret = (uint16x4_t)(__p0);
  30632. return __ret;
  30633. }
  30634. #endif
  30635. #ifdef __LITTLE_ENDIAN__
  30636. __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
  30637. int8x8_t __ret;
  30638. __ret = (int8x8_t)(__p0);
  30639. return __ret;
  30640. }
  30641. #else
  30642. __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
  30643. int8x8_t __ret;
  30644. __ret = (int8x8_t)(__p0);
  30645. return __ret;
  30646. }
  30647. #endif
  30648. #ifdef __LITTLE_ENDIAN__
  30649. __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
  30650. int8x8_t __ret;
  30651. __ret = (int8x8_t)(__p0);
  30652. return __ret;
  30653. }
  30654. #else
  30655. __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
  30656. int8x8_t __ret;
  30657. __ret = (int8x8_t)(__p0);
  30658. return __ret;
  30659. }
  30660. #endif
  30661. #ifdef __LITTLE_ENDIAN__
  30662. __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
  30663. int8x8_t __ret;
  30664. __ret = (int8x8_t)(__p0);
  30665. return __ret;
  30666. }
  30667. #else
  30668. __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
  30669. int8x8_t __ret;
  30670. __ret = (int8x8_t)(__p0);
  30671. return __ret;
  30672. }
  30673. #endif
  30674. #ifdef __LITTLE_ENDIAN__
  30675. __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
  30676. int8x8_t __ret;
  30677. __ret = (int8x8_t)(__p0);
  30678. return __ret;
  30679. }
  30680. #else
  30681. __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
  30682. int8x8_t __ret;
  30683. __ret = (int8x8_t)(__p0);
  30684. return __ret;
  30685. }
  30686. #endif
  30687. #ifdef __LITTLE_ENDIAN__
  30688. __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
  30689. int8x8_t __ret;
  30690. __ret = (int8x8_t)(__p0);
  30691. return __ret;
  30692. }
  30693. #else
  30694. __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
  30695. int8x8_t __ret;
  30696. __ret = (int8x8_t)(__p0);
  30697. return __ret;
  30698. }
  30699. #endif
  30700. #ifdef __LITTLE_ENDIAN__
  30701. __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
  30702. int8x8_t __ret;
  30703. __ret = (int8x8_t)(__p0);
  30704. return __ret;
  30705. }
  30706. #else
  30707. __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
  30708. int8x8_t __ret;
  30709. __ret = (int8x8_t)(__p0);
  30710. return __ret;
  30711. }
  30712. #endif
  30713. #ifdef __LITTLE_ENDIAN__
  30714. __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
  30715. int8x8_t __ret;
  30716. __ret = (int8x8_t)(__p0);
  30717. return __ret;
  30718. }
  30719. #else
  30720. __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
  30721. int8x8_t __ret;
  30722. __ret = (int8x8_t)(__p0);
  30723. return __ret;
  30724. }
  30725. #endif
  30726. #ifdef __LITTLE_ENDIAN__
  30727. __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
  30728. int8x8_t __ret;
  30729. __ret = (int8x8_t)(__p0);
  30730. return __ret;
  30731. }
  30732. #else
  30733. __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
  30734. int8x8_t __ret;
  30735. __ret = (int8x8_t)(__p0);
  30736. return __ret;
  30737. }
  30738. #endif
  30739. #ifdef __LITTLE_ENDIAN__
  30740. __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
  30741. int8x8_t __ret;
  30742. __ret = (int8x8_t)(__p0);
  30743. return __ret;
  30744. }
  30745. #else
  30746. __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
  30747. int8x8_t __ret;
  30748. __ret = (int8x8_t)(__p0);
  30749. return __ret;
  30750. }
  30751. #endif
  30752. #ifdef __LITTLE_ENDIAN__
  30753. __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
  30754. int8x8_t __ret;
  30755. __ret = (int8x8_t)(__p0);
  30756. return __ret;
  30757. }
  30758. #else
  30759. __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
  30760. int8x8_t __ret;
  30761. __ret = (int8x8_t)(__p0);
  30762. return __ret;
  30763. }
  30764. #endif
  30765. #ifdef __LITTLE_ENDIAN__
  30766. __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
  30767. int8x8_t __ret;
  30768. __ret = (int8x8_t)(__p0);
  30769. return __ret;
  30770. }
  30771. #else
  30772. __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
  30773. int8x8_t __ret;
  30774. __ret = (int8x8_t)(__p0);
  30775. return __ret;
  30776. }
  30777. #endif
  30778. #ifdef __LITTLE_ENDIAN__
  30779. __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
  30780. float32x2_t __ret;
  30781. __ret = (float32x2_t)(__p0);
  30782. return __ret;
  30783. }
  30784. #else
  30785. __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
  30786. float32x2_t __ret;
  30787. __ret = (float32x2_t)(__p0);
  30788. return __ret;
  30789. }
  30790. #endif
  30791. #ifdef __LITTLE_ENDIAN__
  30792. __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
  30793. float32x2_t __ret;
  30794. __ret = (float32x2_t)(__p0);
  30795. return __ret;
  30796. }
  30797. #else
  30798. __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
  30799. float32x2_t __ret;
  30800. __ret = (float32x2_t)(__p0);
  30801. return __ret;
  30802. }
  30803. #endif
  30804. #ifdef __LITTLE_ENDIAN__
  30805. __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
  30806. float32x2_t __ret;
  30807. __ret = (float32x2_t)(__p0);
  30808. return __ret;
  30809. }
  30810. #else
  30811. __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
  30812. float32x2_t __ret;
  30813. __ret = (float32x2_t)(__p0);
  30814. return __ret;
  30815. }
  30816. #endif
  30817. #ifdef __LITTLE_ENDIAN__
  30818. __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
  30819. float32x2_t __ret;
  30820. __ret = (float32x2_t)(__p0);
  30821. return __ret;
  30822. }
  30823. #else
  30824. __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
  30825. float32x2_t __ret;
  30826. __ret = (float32x2_t)(__p0);
  30827. return __ret;
  30828. }
  30829. #endif
  30830. #ifdef __LITTLE_ENDIAN__
  30831. __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
  30832. float32x2_t __ret;
  30833. __ret = (float32x2_t)(__p0);
  30834. return __ret;
  30835. }
  30836. #else
  30837. __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
  30838. float32x2_t __ret;
  30839. __ret = (float32x2_t)(__p0);
  30840. return __ret;
  30841. }
  30842. #endif
  30843. #ifdef __LITTLE_ENDIAN__
  30844. __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
  30845. float32x2_t __ret;
  30846. __ret = (float32x2_t)(__p0);
  30847. return __ret;
  30848. }
  30849. #else
  30850. __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
  30851. float32x2_t __ret;
  30852. __ret = (float32x2_t)(__p0);
  30853. return __ret;
  30854. }
  30855. #endif
  30856. #ifdef __LITTLE_ENDIAN__
  30857. __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
  30858. float32x2_t __ret;
  30859. __ret = (float32x2_t)(__p0);
  30860. return __ret;
  30861. }
  30862. #else
  30863. __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
  30864. float32x2_t __ret;
  30865. __ret = (float32x2_t)(__p0);
  30866. return __ret;
  30867. }
  30868. #endif
  30869. #ifdef __LITTLE_ENDIAN__
  30870. __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
  30871. float32x2_t __ret;
  30872. __ret = (float32x2_t)(__p0);
  30873. return __ret;
  30874. }
  30875. #else
  30876. __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
  30877. float32x2_t __ret;
  30878. __ret = (float32x2_t)(__p0);
  30879. return __ret;
  30880. }
  30881. #endif
  30882. #ifdef __LITTLE_ENDIAN__
  30883. __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
  30884. float32x2_t __ret;
  30885. __ret = (float32x2_t)(__p0);
  30886. return __ret;
  30887. }
  30888. #else
  30889. __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
  30890. float32x2_t __ret;
  30891. __ret = (float32x2_t)(__p0);
  30892. return __ret;
  30893. }
  30894. #endif
  30895. #ifdef __LITTLE_ENDIAN__
  30896. __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
  30897. float32x2_t __ret;
  30898. __ret = (float32x2_t)(__p0);
  30899. return __ret;
  30900. }
  30901. #else
  30902. __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
  30903. float32x2_t __ret;
  30904. __ret = (float32x2_t)(__p0);
  30905. return __ret;
  30906. }
  30907. #endif
  30908. #ifdef __LITTLE_ENDIAN__
  30909. __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
  30910. float32x2_t __ret;
  30911. __ret = (float32x2_t)(__p0);
  30912. return __ret;
  30913. }
  30914. #else
  30915. __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
  30916. float32x2_t __ret;
  30917. __ret = (float32x2_t)(__p0);
  30918. return __ret;
  30919. }
  30920. #endif
  30921. #ifdef __LITTLE_ENDIAN__
  30922. __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
  30923. float16x4_t __ret;
  30924. __ret = (float16x4_t)(__p0);
  30925. return __ret;
  30926. }
  30927. #else
  30928. __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
  30929. float16x4_t __ret;
  30930. __ret = (float16x4_t)(__p0);
  30931. return __ret;
  30932. }
  30933. #endif
  30934. #ifdef __LITTLE_ENDIAN__
  30935. __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
  30936. float16x4_t __ret;
  30937. __ret = (float16x4_t)(__p0);
  30938. return __ret;
  30939. }
  30940. #else
  30941. __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
  30942. float16x4_t __ret;
  30943. __ret = (float16x4_t)(__p0);
  30944. return __ret;
  30945. }
  30946. #endif
  30947. #ifdef __LITTLE_ENDIAN__
  30948. __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
  30949. float16x4_t __ret;
  30950. __ret = (float16x4_t)(__p0);
  30951. return __ret;
  30952. }
  30953. #else
  30954. __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
  30955. float16x4_t __ret;
  30956. __ret = (float16x4_t)(__p0);
  30957. return __ret;
  30958. }
  30959. #endif
  30960. #ifdef __LITTLE_ENDIAN__
  30961. __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
  30962. float16x4_t __ret;
  30963. __ret = (float16x4_t)(__p0);
  30964. return __ret;
  30965. }
  30966. #else
  30967. __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
  30968. float16x4_t __ret;
  30969. __ret = (float16x4_t)(__p0);
  30970. return __ret;
  30971. }
  30972. #endif
  30973. #ifdef __LITTLE_ENDIAN__
  30974. __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
  30975. float16x4_t __ret;
  30976. __ret = (float16x4_t)(__p0);
  30977. return __ret;
  30978. }
  30979. #else
  30980. __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
  30981. float16x4_t __ret;
  30982. __ret = (float16x4_t)(__p0);
  30983. return __ret;
  30984. }
  30985. #endif
  30986. #ifdef __LITTLE_ENDIAN__
  30987. __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
  30988. float16x4_t __ret;
  30989. __ret = (float16x4_t)(__p0);
  30990. return __ret;
  30991. }
  30992. #else
  30993. __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
  30994. float16x4_t __ret;
  30995. __ret = (float16x4_t)(__p0);
  30996. return __ret;
  30997. }
  30998. #endif
  30999. #ifdef __LITTLE_ENDIAN__
  31000. __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
  31001. float16x4_t __ret;
  31002. __ret = (float16x4_t)(__p0);
  31003. return __ret;
  31004. }
  31005. #else
  31006. __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
  31007. float16x4_t __ret;
  31008. __ret = (float16x4_t)(__p0);
  31009. return __ret;
  31010. }
  31011. #endif
  31012. #ifdef __LITTLE_ENDIAN__
  31013. __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
  31014. float16x4_t __ret;
  31015. __ret = (float16x4_t)(__p0);
  31016. return __ret;
  31017. }
  31018. #else
  31019. __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
  31020. float16x4_t __ret;
  31021. __ret = (float16x4_t)(__p0);
  31022. return __ret;
  31023. }
  31024. #endif
  31025. #ifdef __LITTLE_ENDIAN__
  31026. __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
  31027. float16x4_t __ret;
  31028. __ret = (float16x4_t)(__p0);
  31029. return __ret;
  31030. }
  31031. #else
  31032. __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
  31033. float16x4_t __ret;
  31034. __ret = (float16x4_t)(__p0);
  31035. return __ret;
  31036. }
  31037. #endif
  31038. #ifdef __LITTLE_ENDIAN__
  31039. __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
  31040. float16x4_t __ret;
  31041. __ret = (float16x4_t)(__p0);
  31042. return __ret;
  31043. }
  31044. #else
  31045. __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
  31046. float16x4_t __ret;
  31047. __ret = (float16x4_t)(__p0);
  31048. return __ret;
  31049. }
  31050. #endif
  31051. #ifdef __LITTLE_ENDIAN__
  31052. __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
  31053. float16x4_t __ret;
  31054. __ret = (float16x4_t)(__p0);
  31055. return __ret;
  31056. }
  31057. #else
  31058. __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
  31059. float16x4_t __ret;
  31060. __ret = (float16x4_t)(__p0);
  31061. return __ret;
  31062. }
  31063. #endif
  31064. #ifdef __LITTLE_ENDIAN__
  31065. __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
  31066. int32x2_t __ret;
  31067. __ret = (int32x2_t)(__p0);
  31068. return __ret;
  31069. }
  31070. #else
  31071. __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
  31072. int32x2_t __ret;
  31073. __ret = (int32x2_t)(__p0);
  31074. return __ret;
  31075. }
  31076. #endif
  31077. #ifdef __LITTLE_ENDIAN__
  31078. __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
  31079. int32x2_t __ret;
  31080. __ret = (int32x2_t)(__p0);
  31081. return __ret;
  31082. }
  31083. #else
  31084. __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
  31085. int32x2_t __ret;
  31086. __ret = (int32x2_t)(__p0);
  31087. return __ret;
  31088. }
  31089. #endif
  31090. #ifdef __LITTLE_ENDIAN__
  31091. __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
  31092. int32x2_t __ret;
  31093. __ret = (int32x2_t)(__p0);
  31094. return __ret;
  31095. }
  31096. #else
  31097. __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
  31098. int32x2_t __ret;
  31099. __ret = (int32x2_t)(__p0);
  31100. return __ret;
  31101. }
  31102. #endif
  31103. #ifdef __LITTLE_ENDIAN__
  31104. __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
  31105. int32x2_t __ret;
  31106. __ret = (int32x2_t)(__p0);
  31107. return __ret;
  31108. }
  31109. #else
  31110. __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
  31111. int32x2_t __ret;
  31112. __ret = (int32x2_t)(__p0);
  31113. return __ret;
  31114. }
  31115. #endif
  31116. #ifdef __LITTLE_ENDIAN__
  31117. __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
  31118. int32x2_t __ret;
  31119. __ret = (int32x2_t)(__p0);
  31120. return __ret;
  31121. }
  31122. #else
  31123. __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
  31124. int32x2_t __ret;
  31125. __ret = (int32x2_t)(__p0);
  31126. return __ret;
  31127. }
  31128. #endif
  31129. #ifdef __LITTLE_ENDIAN__
  31130. __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
  31131. int32x2_t __ret;
  31132. __ret = (int32x2_t)(__p0);
  31133. return __ret;
  31134. }
  31135. #else
  31136. __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
  31137. int32x2_t __ret;
  31138. __ret = (int32x2_t)(__p0);
  31139. return __ret;
  31140. }
  31141. #endif
  31142. #ifdef __LITTLE_ENDIAN__
  31143. __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
  31144. int32x2_t __ret;
  31145. __ret = (int32x2_t)(__p0);
  31146. return __ret;
  31147. }
  31148. #else
  31149. __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
  31150. int32x2_t __ret;
  31151. __ret = (int32x2_t)(__p0);
  31152. return __ret;
  31153. }
  31154. #endif
  31155. #ifdef __LITTLE_ENDIAN__
  31156. __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
  31157. int32x2_t __ret;
  31158. __ret = (int32x2_t)(__p0);
  31159. return __ret;
  31160. }
  31161. #else
  31162. __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
  31163. int32x2_t __ret;
  31164. __ret = (int32x2_t)(__p0);
  31165. return __ret;
  31166. }
  31167. #endif
  31168. #ifdef __LITTLE_ENDIAN__
  31169. __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
  31170. int32x2_t __ret;
  31171. __ret = (int32x2_t)(__p0);
  31172. return __ret;
  31173. }
  31174. #else
  31175. __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
  31176. int32x2_t __ret;
  31177. __ret = (int32x2_t)(__p0);
  31178. return __ret;
  31179. }
  31180. #endif
  31181. #ifdef __LITTLE_ENDIAN__
  31182. __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
  31183. int32x2_t __ret;
  31184. __ret = (int32x2_t)(__p0);
  31185. return __ret;
  31186. }
  31187. #else
  31188. __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
  31189. int32x2_t __ret;
  31190. __ret = (int32x2_t)(__p0);
  31191. return __ret;
  31192. }
  31193. #endif
  31194. #ifdef __LITTLE_ENDIAN__
  31195. __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
  31196. int32x2_t __ret;
  31197. __ret = (int32x2_t)(__p0);
  31198. return __ret;
  31199. }
  31200. #else
  31201. __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
  31202. int32x2_t __ret;
  31203. __ret = (int32x2_t)(__p0);
  31204. return __ret;
  31205. }
  31206. #endif
  31207. #ifdef __LITTLE_ENDIAN__
  31208. __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
  31209. int64x1_t __ret;
  31210. __ret = (int64x1_t)(__p0);
  31211. return __ret;
  31212. }
  31213. #else
  31214. __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
  31215. int64x1_t __ret;
  31216. __ret = (int64x1_t)(__p0);
  31217. return __ret;
  31218. }
  31219. #endif
  31220. #ifdef __LITTLE_ENDIAN__
  31221. __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
  31222. int64x1_t __ret;
  31223. __ret = (int64x1_t)(__p0);
  31224. return __ret;
  31225. }
  31226. #else
  31227. __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
  31228. int64x1_t __ret;
  31229. __ret = (int64x1_t)(__p0);
  31230. return __ret;
  31231. }
  31232. #endif
  31233. #ifdef __LITTLE_ENDIAN__
  31234. __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
  31235. int64x1_t __ret;
  31236. __ret = (int64x1_t)(__p0);
  31237. return __ret;
  31238. }
  31239. #else
  31240. __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
  31241. int64x1_t __ret;
  31242. __ret = (int64x1_t)(__p0);
  31243. return __ret;
  31244. }
  31245. #endif
  31246. #ifdef __LITTLE_ENDIAN__
  31247. __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
  31248. int64x1_t __ret;
  31249. __ret = (int64x1_t)(__p0);
  31250. return __ret;
  31251. }
  31252. #else
  31253. __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
  31254. int64x1_t __ret;
  31255. __ret = (int64x1_t)(__p0);
  31256. return __ret;
  31257. }
  31258. #endif
  31259. #ifdef __LITTLE_ENDIAN__
  31260. __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
  31261. int64x1_t __ret;
  31262. __ret = (int64x1_t)(__p0);
  31263. return __ret;
  31264. }
  31265. #else
  31266. __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
  31267. int64x1_t __ret;
  31268. __ret = (int64x1_t)(__p0);
  31269. return __ret;
  31270. }
  31271. #endif
  31272. #ifdef __LITTLE_ENDIAN__
  31273. __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
  31274. int64x1_t __ret;
  31275. __ret = (int64x1_t)(__p0);
  31276. return __ret;
  31277. }
  31278. #else
  31279. __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
  31280. int64x1_t __ret;
  31281. __ret = (int64x1_t)(__p0);
  31282. return __ret;
  31283. }
  31284. #endif
  31285. #ifdef __LITTLE_ENDIAN__
  31286. __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
  31287. int64x1_t __ret;
  31288. __ret = (int64x1_t)(__p0);
  31289. return __ret;
  31290. }
  31291. #else
  31292. __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
  31293. int64x1_t __ret;
  31294. __ret = (int64x1_t)(__p0);
  31295. return __ret;
  31296. }
  31297. #endif
  31298. #ifdef __LITTLE_ENDIAN__
  31299. __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
  31300. int64x1_t __ret;
  31301. __ret = (int64x1_t)(__p0);
  31302. return __ret;
  31303. }
  31304. #else
  31305. __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
  31306. int64x1_t __ret;
  31307. __ret = (int64x1_t)(__p0);
  31308. return __ret;
  31309. }
  31310. #endif
  31311. #ifdef __LITTLE_ENDIAN__
  31312. __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
  31313. int64x1_t __ret;
  31314. __ret = (int64x1_t)(__p0);
  31315. return __ret;
  31316. }
  31317. #else
  31318. __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
  31319. int64x1_t __ret;
  31320. __ret = (int64x1_t)(__p0);
  31321. return __ret;
  31322. }
  31323. #endif
  31324. #ifdef __LITTLE_ENDIAN__
  31325. __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
  31326. int64x1_t __ret;
  31327. __ret = (int64x1_t)(__p0);
  31328. return __ret;
  31329. }
  31330. #else
  31331. __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
  31332. int64x1_t __ret;
  31333. __ret = (int64x1_t)(__p0);
  31334. return __ret;
  31335. }
  31336. #endif
  31337. #ifdef __LITTLE_ENDIAN__
  31338. __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
  31339. int64x1_t __ret;
  31340. __ret = (int64x1_t)(__p0);
  31341. return __ret;
  31342. }
  31343. #else
  31344. __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
  31345. int64x1_t __ret;
  31346. __ret = (int64x1_t)(__p0);
  31347. return __ret;
  31348. }
  31349. #endif
  31350. #ifdef __LITTLE_ENDIAN__
  31351. __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
  31352. int16x4_t __ret;
  31353. __ret = (int16x4_t)(__p0);
  31354. return __ret;
  31355. }
  31356. #else
  31357. __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
  31358. int16x4_t __ret;
  31359. __ret = (int16x4_t)(__p0);
  31360. return __ret;
  31361. }
  31362. #endif
  31363. #ifdef __LITTLE_ENDIAN__
  31364. __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
  31365. int16x4_t __ret;
  31366. __ret = (int16x4_t)(__p0);
  31367. return __ret;
  31368. }
  31369. #else
  31370. __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
  31371. int16x4_t __ret;
  31372. __ret = (int16x4_t)(__p0);
  31373. return __ret;
  31374. }
  31375. #endif
  31376. #ifdef __LITTLE_ENDIAN__
  31377. __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
  31378. int16x4_t __ret;
  31379. __ret = (int16x4_t)(__p0);
  31380. return __ret;
  31381. }
  31382. #else
  31383. __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
  31384. int16x4_t __ret;
  31385. __ret = (int16x4_t)(__p0);
  31386. return __ret;
  31387. }
  31388. #endif
  31389. #ifdef __LITTLE_ENDIAN__
  31390. __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
  31391. int16x4_t __ret;
  31392. __ret = (int16x4_t)(__p0);
  31393. return __ret;
  31394. }
  31395. #else
  31396. __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
  31397. int16x4_t __ret;
  31398. __ret = (int16x4_t)(__p0);
  31399. return __ret;
  31400. }
  31401. #endif
  31402. #ifdef __LITTLE_ENDIAN__
  31403. __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
  31404. int16x4_t __ret;
  31405. __ret = (int16x4_t)(__p0);
  31406. return __ret;
  31407. }
  31408. #else
  31409. __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
  31410. int16x4_t __ret;
  31411. __ret = (int16x4_t)(__p0);
  31412. return __ret;
  31413. }
  31414. #endif
  31415. #ifdef __LITTLE_ENDIAN__
  31416. __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
  31417. int16x4_t __ret;
  31418. __ret = (int16x4_t)(__p0);
  31419. return __ret;
  31420. }
  31421. #else
  31422. __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
  31423. int16x4_t __ret;
  31424. __ret = (int16x4_t)(__p0);
  31425. return __ret;
  31426. }
  31427. #endif
  31428. #ifdef __LITTLE_ENDIAN__
  31429. __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
  31430. int16x4_t __ret;
  31431. __ret = (int16x4_t)(__p0);
  31432. return __ret;
  31433. }
  31434. #else
  31435. __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
  31436. int16x4_t __ret;
  31437. __ret = (int16x4_t)(__p0);
  31438. return __ret;
  31439. }
  31440. #endif
  31441. #ifdef __LITTLE_ENDIAN__
  31442. __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
  31443. int16x4_t __ret;
  31444. __ret = (int16x4_t)(__p0);
  31445. return __ret;
  31446. }
  31447. #else
  31448. __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
  31449. int16x4_t __ret;
  31450. __ret = (int16x4_t)(__p0);
  31451. return __ret;
  31452. }
  31453. #endif
  31454. #ifdef __LITTLE_ENDIAN__
  31455. __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
  31456. int16x4_t __ret;
  31457. __ret = (int16x4_t)(__p0);
  31458. return __ret;
  31459. }
  31460. #else
  31461. __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
  31462. int16x4_t __ret;
  31463. __ret = (int16x4_t)(__p0);
  31464. return __ret;
  31465. }
  31466. #endif
  31467. #ifdef __LITTLE_ENDIAN__
  31468. __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
  31469. int16x4_t __ret;
  31470. __ret = (int16x4_t)(__p0);
  31471. return __ret;
  31472. }
  31473. #else
  31474. __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
  31475. int16x4_t __ret;
  31476. __ret = (int16x4_t)(__p0);
  31477. return __ret;
  31478. }
  31479. #endif
  31480. #ifdef __LITTLE_ENDIAN__
  31481. __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
  31482. int16x4_t __ret;
  31483. __ret = (int16x4_t)(__p0);
  31484. return __ret;
  31485. }
  31486. #else
  31487. __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
  31488. int16x4_t __ret;
  31489. __ret = (int16x4_t)(__p0);
  31490. return __ret;
  31491. }
  31492. #endif
  31493. #endif
  31494. #if (__ARM_FP & 2)
  31495. #ifdef __LITTLE_ENDIAN__
  31496. __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
  31497. float16x4_t __ret;
  31498. __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
  31499. return __ret;
  31500. }
  31501. #else
  31502. __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
  31503. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31504. float16x4_t __ret;
  31505. __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
  31506. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31507. return __ret;
  31508. }
  31509. __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
  31510. float16x4_t __ret;
  31511. __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
  31512. return __ret;
  31513. }
  31514. #endif
  31515. #ifdef __LITTLE_ENDIAN__
  31516. __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
  31517. float32x4_t __ret;
  31518. __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
  31519. return __ret;
  31520. }
  31521. #else
  31522. __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
  31523. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31524. float32x4_t __ret;
  31525. __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
  31526. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31527. return __ret;
  31528. }
  31529. __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
  31530. float32x4_t __ret;
  31531. __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
  31532. return __ret;
  31533. }
  31534. #endif
  31535. #endif
  31536. #if __ARM_ARCH >= 8
  31537. #ifdef __LITTLE_ENDIAN__
  31538. __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
  31539. int32x4_t __ret;
  31540. __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
  31541. return __ret;
  31542. }
  31543. #else
  31544. __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
  31545. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31546. int32x4_t __ret;
  31547. __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
  31548. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31549. return __ret;
  31550. }
  31551. #endif
  31552. #ifdef __LITTLE_ENDIAN__
  31553. __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
  31554. int32x2_t __ret;
  31555. __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
  31556. return __ret;
  31557. }
  31558. #else
  31559. __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
  31560. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31561. int32x2_t __ret;
  31562. __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
  31563. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31564. return __ret;
  31565. }
  31566. #endif
  31567. #ifdef __LITTLE_ENDIAN__
  31568. __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
  31569. uint32x4_t __ret;
  31570. __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
  31571. return __ret;
  31572. }
  31573. #else
  31574. __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
  31575. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31576. uint32x4_t __ret;
  31577. __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
  31578. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31579. return __ret;
  31580. }
  31581. #endif
  31582. #ifdef __LITTLE_ENDIAN__
  31583. __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
  31584. uint32x2_t __ret;
  31585. __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
  31586. return __ret;
  31587. }
  31588. #else
  31589. __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
  31590. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31591. uint32x2_t __ret;
  31592. __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
  31593. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31594. return __ret;
  31595. }
  31596. #endif
  31597. #ifdef __LITTLE_ENDIAN__
  31598. __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
  31599. int32x4_t __ret;
  31600. __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
  31601. return __ret;
  31602. }
  31603. #else
  31604. __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
  31605. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31606. int32x4_t __ret;
  31607. __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
  31608. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31609. return __ret;
  31610. }
  31611. #endif
  31612. #ifdef __LITTLE_ENDIAN__
  31613. __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
  31614. int32x2_t __ret;
  31615. __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
  31616. return __ret;
  31617. }
  31618. #else
  31619. __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
  31620. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31621. int32x2_t __ret;
  31622. __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
  31623. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31624. return __ret;
  31625. }
  31626. #endif
  31627. #ifdef __LITTLE_ENDIAN__
  31628. __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
  31629. uint32x4_t __ret;
  31630. __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
  31631. return __ret;
  31632. }
  31633. #else
  31634. __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
  31635. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31636. uint32x4_t __ret;
  31637. __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
  31638. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31639. return __ret;
  31640. }
  31641. #endif
  31642. #ifdef __LITTLE_ENDIAN__
  31643. __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
  31644. uint32x2_t __ret;
  31645. __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
  31646. return __ret;
  31647. }
  31648. #else
  31649. __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
  31650. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31651. uint32x2_t __ret;
  31652. __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
  31653. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31654. return __ret;
  31655. }
  31656. #endif
  31657. #ifdef __LITTLE_ENDIAN__
  31658. __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
  31659. int32x4_t __ret;
  31660. __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
  31661. return __ret;
  31662. }
  31663. #else
  31664. __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
  31665. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31666. int32x4_t __ret;
  31667. __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
  31668. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31669. return __ret;
  31670. }
  31671. #endif
  31672. #ifdef __LITTLE_ENDIAN__
  31673. __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
  31674. int32x2_t __ret;
  31675. __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
  31676. return __ret;
  31677. }
  31678. #else
  31679. __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
  31680. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31681. int32x2_t __ret;
  31682. __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
  31683. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31684. return __ret;
  31685. }
  31686. #endif
  31687. #ifdef __LITTLE_ENDIAN__
  31688. __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
  31689. uint32x4_t __ret;
  31690. __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
  31691. return __ret;
  31692. }
  31693. #else
  31694. __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
  31695. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31696. uint32x4_t __ret;
  31697. __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
  31698. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31699. return __ret;
  31700. }
  31701. #endif
  31702. #ifdef __LITTLE_ENDIAN__
  31703. __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
  31704. uint32x2_t __ret;
  31705. __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
  31706. return __ret;
  31707. }
  31708. #else
  31709. __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
  31710. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31711. uint32x2_t __ret;
  31712. __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
  31713. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31714. return __ret;
  31715. }
  31716. #endif
  31717. #ifdef __LITTLE_ENDIAN__
  31718. __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
  31719. int32x4_t __ret;
  31720. __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
  31721. return __ret;
  31722. }
  31723. #else
  31724. __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
  31725. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31726. int32x4_t __ret;
  31727. __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
  31728. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31729. return __ret;
  31730. }
  31731. #endif
  31732. #ifdef __LITTLE_ENDIAN__
  31733. __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
  31734. int32x2_t __ret;
  31735. __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
  31736. return __ret;
  31737. }
  31738. #else
  31739. __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
  31740. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31741. int32x2_t __ret;
  31742. __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
  31743. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31744. return __ret;
  31745. }
  31746. #endif
  31747. #ifdef __LITTLE_ENDIAN__
  31748. __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
  31749. uint32x4_t __ret;
  31750. __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
  31751. return __ret;
  31752. }
  31753. #else
  31754. __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
  31755. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31756. uint32x4_t __ret;
  31757. __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
  31758. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31759. return __ret;
  31760. }
  31761. #endif
  31762. #ifdef __LITTLE_ENDIAN__
  31763. __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
  31764. uint32x2_t __ret;
  31765. __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
  31766. return __ret;
  31767. }
  31768. #else
  31769. __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
  31770. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31771. uint32x2_t __ret;
  31772. __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
  31773. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31774. return __ret;
  31775. }
  31776. #endif
  31777. #endif
  31778. #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  31779. #ifdef __LITTLE_ENDIAN__
  31780. __ai float32x4_t vrndq_f32(float32x4_t __p0) {
  31781. float32x4_t __ret;
  31782. __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
  31783. return __ret;
  31784. }
  31785. #else
  31786. __ai float32x4_t vrndq_f32(float32x4_t __p0) {
  31787. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31788. float32x4_t __ret;
  31789. __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
  31790. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31791. return __ret;
  31792. }
  31793. #endif
  31794. #ifdef __LITTLE_ENDIAN__
  31795. __ai float32x2_t vrnd_f32(float32x2_t __p0) {
  31796. float32x2_t __ret;
  31797. __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
  31798. return __ret;
  31799. }
  31800. #else
  31801. __ai float32x2_t vrnd_f32(float32x2_t __p0) {
  31802. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31803. float32x2_t __ret;
  31804. __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
  31805. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31806. return __ret;
  31807. }
  31808. #endif
  31809. #ifdef __LITTLE_ENDIAN__
  31810. __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
  31811. float32x4_t __ret;
  31812. __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
  31813. return __ret;
  31814. }
  31815. #else
  31816. __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
  31817. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31818. float32x4_t __ret;
  31819. __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
  31820. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31821. return __ret;
  31822. }
  31823. #endif
  31824. #ifdef __LITTLE_ENDIAN__
  31825. __ai float32x2_t vrnda_f32(float32x2_t __p0) {
  31826. float32x2_t __ret;
  31827. __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
  31828. return __ret;
  31829. }
  31830. #else
  31831. __ai float32x2_t vrnda_f32(float32x2_t __p0) {
  31832. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31833. float32x2_t __ret;
  31834. __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
  31835. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31836. return __ret;
  31837. }
  31838. #endif
  31839. #ifdef __LITTLE_ENDIAN__
  31840. __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
  31841. float32x4_t __ret;
  31842. __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
  31843. return __ret;
  31844. }
  31845. #else
  31846. __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
  31847. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31848. float32x4_t __ret;
  31849. __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
  31850. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31851. return __ret;
  31852. }
  31853. #endif
  31854. #ifdef __LITTLE_ENDIAN__
  31855. __ai float32x2_t vrndm_f32(float32x2_t __p0) {
  31856. float32x2_t __ret;
  31857. __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
  31858. return __ret;
  31859. }
  31860. #else
  31861. __ai float32x2_t vrndm_f32(float32x2_t __p0) {
  31862. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31863. float32x2_t __ret;
  31864. __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
  31865. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31866. return __ret;
  31867. }
  31868. #endif
  31869. #ifdef __LITTLE_ENDIAN__
  31870. __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
  31871. float32x4_t __ret;
  31872. __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
  31873. return __ret;
  31874. }
  31875. #else
  31876. __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
  31877. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31878. float32x4_t __ret;
  31879. __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
  31880. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31881. return __ret;
  31882. }
  31883. #endif
  31884. #ifdef __LITTLE_ENDIAN__
  31885. __ai float32x2_t vrndn_f32(float32x2_t __p0) {
  31886. float32x2_t __ret;
  31887. __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
  31888. return __ret;
  31889. }
  31890. #else
  31891. __ai float32x2_t vrndn_f32(float32x2_t __p0) {
  31892. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31893. float32x2_t __ret;
  31894. __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
  31895. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31896. return __ret;
  31897. }
  31898. #endif
  31899. #ifdef __LITTLE_ENDIAN__
  31900. __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
  31901. float32x4_t __ret;
  31902. __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
  31903. return __ret;
  31904. }
  31905. #else
  31906. __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
  31907. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31908. float32x4_t __ret;
  31909. __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
  31910. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31911. return __ret;
  31912. }
  31913. #endif
  31914. #ifdef __LITTLE_ENDIAN__
  31915. __ai float32x2_t vrndp_f32(float32x2_t __p0) {
  31916. float32x2_t __ret;
  31917. __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
  31918. return __ret;
  31919. }
  31920. #else
  31921. __ai float32x2_t vrndp_f32(float32x2_t __p0) {
  31922. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31923. float32x2_t __ret;
  31924. __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
  31925. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31926. return __ret;
  31927. }
  31928. #endif
  31929. #ifdef __LITTLE_ENDIAN__
  31930. __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
  31931. float32x4_t __ret;
  31932. __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
  31933. return __ret;
  31934. }
  31935. #else
  31936. __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
  31937. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31938. float32x4_t __ret;
  31939. __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
  31940. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31941. return __ret;
  31942. }
  31943. #endif
  31944. #ifdef __LITTLE_ENDIAN__
  31945. __ai float32x2_t vrndx_f32(float32x2_t __p0) {
  31946. float32x2_t __ret;
  31947. __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
  31948. return __ret;
  31949. }
  31950. #else
  31951. __ai float32x2_t vrndx_f32(float32x2_t __p0) {
  31952. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31953. float32x2_t __ret;
  31954. __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
  31955. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31956. return __ret;
  31957. }
  31958. #endif
  31959. #endif
  31960. #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
  31961. #ifdef __LITTLE_ENDIAN__
  31962. __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  31963. float32x4_t __ret;
  31964. __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  31965. return __ret;
  31966. }
  31967. #else
  31968. __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  31969. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  31970. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  31971. float32x4_t __ret;
  31972. __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  31973. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  31974. return __ret;
  31975. }
  31976. #endif
  31977. #ifdef __LITTLE_ENDIAN__
  31978. __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
  31979. float32x2_t __ret;
  31980. __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  31981. return __ret;
  31982. }
  31983. #else
  31984. __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
  31985. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  31986. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  31987. float32x2_t __ret;
  31988. __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  31989. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  31990. return __ret;
  31991. }
  31992. #endif
  31993. #ifdef __LITTLE_ENDIAN__
  31994. __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  31995. float32x4_t __ret;
  31996. __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  31997. return __ret;
  31998. }
  31999. #else
  32000. __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  32001. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  32002. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  32003. float32x4_t __ret;
  32004. __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  32005. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  32006. return __ret;
  32007. }
  32008. #endif
  32009. #ifdef __LITTLE_ENDIAN__
  32010. __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
  32011. float32x2_t __ret;
  32012. __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  32013. return __ret;
  32014. }
  32015. #else
  32016. __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
  32017. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32018. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  32019. float32x2_t __ret;
  32020. __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  32021. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32022. return __ret;
  32023. }
  32024. #endif
  32025. #endif
  32026. #if __ARM_ARCH >= 8 && defined(__aarch64__)
  32027. #ifdef __LITTLE_ENDIAN__
  32028. __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
  32029. int64x2_t __ret;
  32030. __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
  32031. return __ret;
  32032. }
  32033. #else
  32034. __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
  32035. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32036. int64x2_t __ret;
  32037. __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
  32038. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32039. return __ret;
  32040. }
  32041. #endif
  32042. #ifdef __LITTLE_ENDIAN__
  32043. __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
  32044. int64x1_t __ret;
  32045. __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
  32046. return __ret;
  32047. }
  32048. #else
  32049. __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
  32050. int64x1_t __ret;
  32051. __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
  32052. return __ret;
  32053. }
  32054. #endif
  32055. #ifdef __LITTLE_ENDIAN__
  32056. __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
  32057. uint64x2_t __ret;
  32058. __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
  32059. return __ret;
  32060. }
  32061. #else
  32062. __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
  32063. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32064. uint64x2_t __ret;
  32065. __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
  32066. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32067. return __ret;
  32068. }
  32069. #endif
  32070. #ifdef __LITTLE_ENDIAN__
  32071. __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
  32072. uint64x1_t __ret;
  32073. __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
  32074. return __ret;
  32075. }
  32076. #else
  32077. __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
  32078. uint64x1_t __ret;
  32079. __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
  32080. return __ret;
  32081. }
  32082. #endif
  32083. #ifdef __LITTLE_ENDIAN__
  32084. __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
  32085. int64x2_t __ret;
  32086. __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
  32087. return __ret;
  32088. }
  32089. #else
  32090. __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
  32091. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32092. int64x2_t __ret;
  32093. __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
  32094. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32095. return __ret;
  32096. }
  32097. #endif
  32098. #ifdef __LITTLE_ENDIAN__
  32099. __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
  32100. int64x1_t __ret;
  32101. __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
  32102. return __ret;
  32103. }
  32104. #else
  32105. __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
  32106. int64x1_t __ret;
  32107. __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
  32108. return __ret;
  32109. }
  32110. #endif
  32111. #ifdef __LITTLE_ENDIAN__
  32112. __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
  32113. uint64x2_t __ret;
  32114. __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
  32115. return __ret;
  32116. }
  32117. #else
  32118. __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
  32119. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32120. uint64x2_t __ret;
  32121. __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
  32122. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32123. return __ret;
  32124. }
  32125. #endif
  32126. #ifdef __LITTLE_ENDIAN__
  32127. __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
  32128. uint64x1_t __ret;
  32129. __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
  32130. return __ret;
  32131. }
  32132. #else
  32133. __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
  32134. uint64x1_t __ret;
  32135. __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
  32136. return __ret;
  32137. }
  32138. #endif
  32139. #ifdef __LITTLE_ENDIAN__
  32140. __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
  32141. int64x2_t __ret;
  32142. __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
  32143. return __ret;
  32144. }
  32145. #else
  32146. __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
  32147. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32148. int64x2_t __ret;
  32149. __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
  32150. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32151. return __ret;
  32152. }
  32153. #endif
  32154. #ifdef __LITTLE_ENDIAN__
  32155. __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
  32156. int64x1_t __ret;
  32157. __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
  32158. return __ret;
  32159. }
  32160. #else
  32161. __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
  32162. int64x1_t __ret;
  32163. __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
  32164. return __ret;
  32165. }
  32166. #endif
  32167. #ifdef __LITTLE_ENDIAN__
  32168. __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
  32169. uint64x2_t __ret;
  32170. __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
  32171. return __ret;
  32172. }
  32173. #else
  32174. __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
  32175. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32176. uint64x2_t __ret;
  32177. __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
  32178. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32179. return __ret;
  32180. }
  32181. #endif
  32182. #ifdef __LITTLE_ENDIAN__
  32183. __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
  32184. uint64x1_t __ret;
  32185. __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
  32186. return __ret;
  32187. }
  32188. #else
  32189. __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
  32190. uint64x1_t __ret;
  32191. __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
  32192. return __ret;
  32193. }
  32194. #endif
  32195. #ifdef __LITTLE_ENDIAN__
  32196. __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
  32197. int64x2_t __ret;
  32198. __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
  32199. return __ret;
  32200. }
  32201. #else
  32202. __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
  32203. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32204. int64x2_t __ret;
  32205. __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
  32206. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32207. return __ret;
  32208. }
  32209. #endif
  32210. #ifdef __LITTLE_ENDIAN__
  32211. __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
  32212. int64x1_t __ret;
  32213. __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
  32214. return __ret;
  32215. }
  32216. #else
  32217. __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
  32218. int64x1_t __ret;
  32219. __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
  32220. return __ret;
  32221. }
  32222. #endif
  32223. #ifdef __LITTLE_ENDIAN__
  32224. __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
  32225. uint64x2_t __ret;
  32226. __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
  32227. return __ret;
  32228. }
  32229. #else
  32230. __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
  32231. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  32232. uint64x2_t __ret;
  32233. __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
  32234. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  32235. return __ret;
  32236. }
  32237. #endif
  32238. #ifdef __LITTLE_ENDIAN__
  32239. __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
  32240. uint64x1_t __ret;
  32241. __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
  32242. return __ret;
  32243. }
  32244. #else
  32245. __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
  32246. uint64x1_t __ret;
  32247. __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
  32248. return __ret;
  32249. }
  32250. #endif
  32251. #ifdef __LITTLE_ENDIAN__
  32252. __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
  32253. poly8x8_t __ret;
  32254. __ret = (poly8x8_t)(__p0);
  32255. return __ret;
  32256. }
  32257. #else
  32258. __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
  32259. poly8x8_t __ret;
  32260. __ret = (poly8x8_t)(__p0);
  32261. return __ret;
  32262. }
  32263. #endif
  32264. #ifdef __LITTLE_ENDIAN__
  32265. __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
  32266. poly8x8_t __ret;
  32267. __ret = (poly8x8_t)(__p0);
  32268. return __ret;
  32269. }
  32270. #else
  32271. __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
  32272. poly8x8_t __ret;
  32273. __ret = (poly8x8_t)(__p0);
  32274. return __ret;
  32275. }
  32276. #endif
  32277. #ifdef __LITTLE_ENDIAN__
  32278. __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
  32279. poly8x8_t __ret;
  32280. __ret = (poly8x8_t)(__p0);
  32281. return __ret;
  32282. }
  32283. #else
  32284. __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
  32285. poly8x8_t __ret;
  32286. __ret = (poly8x8_t)(__p0);
  32287. return __ret;
  32288. }
  32289. #endif
  32290. #ifdef __LITTLE_ENDIAN__
  32291. __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
  32292. poly8x8_t __ret;
  32293. __ret = (poly8x8_t)(__p0);
  32294. return __ret;
  32295. }
  32296. #else
  32297. __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
  32298. poly8x8_t __ret;
  32299. __ret = (poly8x8_t)(__p0);
  32300. return __ret;
  32301. }
  32302. #endif
  32303. #ifdef __LITTLE_ENDIAN__
  32304. __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
  32305. poly8x8_t __ret;
  32306. __ret = (poly8x8_t)(__p0);
  32307. return __ret;
  32308. }
  32309. #else
  32310. __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
  32311. poly8x8_t __ret;
  32312. __ret = (poly8x8_t)(__p0);
  32313. return __ret;
  32314. }
  32315. #endif
  32316. #ifdef __LITTLE_ENDIAN__
  32317. __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
  32318. poly8x8_t __ret;
  32319. __ret = (poly8x8_t)(__p0);
  32320. return __ret;
  32321. }
  32322. #else
  32323. __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
  32324. poly8x8_t __ret;
  32325. __ret = (poly8x8_t)(__p0);
  32326. return __ret;
  32327. }
  32328. #endif
  32329. #ifdef __LITTLE_ENDIAN__
  32330. __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
  32331. poly8x8_t __ret;
  32332. __ret = (poly8x8_t)(__p0);
  32333. return __ret;
  32334. }
  32335. #else
  32336. __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
  32337. poly8x8_t __ret;
  32338. __ret = (poly8x8_t)(__p0);
  32339. return __ret;
  32340. }
  32341. #endif
  32342. #ifdef __LITTLE_ENDIAN__
  32343. __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
  32344. poly8x8_t __ret;
  32345. __ret = (poly8x8_t)(__p0);
  32346. return __ret;
  32347. }
  32348. #else
  32349. __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
  32350. poly8x8_t __ret;
  32351. __ret = (poly8x8_t)(__p0);
  32352. return __ret;
  32353. }
  32354. #endif
  32355. #ifdef __LITTLE_ENDIAN__
  32356. __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
  32357. poly8x8_t __ret;
  32358. __ret = (poly8x8_t)(__p0);
  32359. return __ret;
  32360. }
  32361. #else
  32362. __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
  32363. poly8x8_t __ret;
  32364. __ret = (poly8x8_t)(__p0);
  32365. return __ret;
  32366. }
  32367. #endif
  32368. #ifdef __LITTLE_ENDIAN__
  32369. __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
  32370. poly8x8_t __ret;
  32371. __ret = (poly8x8_t)(__p0);
  32372. return __ret;
  32373. }
  32374. #else
  32375. __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
  32376. poly8x8_t __ret;
  32377. __ret = (poly8x8_t)(__p0);
  32378. return __ret;
  32379. }
  32380. #endif
  32381. #ifdef __LITTLE_ENDIAN__
  32382. __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
  32383. poly8x8_t __ret;
  32384. __ret = (poly8x8_t)(__p0);
  32385. return __ret;
  32386. }
  32387. #else
  32388. __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
  32389. poly8x8_t __ret;
  32390. __ret = (poly8x8_t)(__p0);
  32391. return __ret;
  32392. }
  32393. #endif
  32394. #ifdef __LITTLE_ENDIAN__
  32395. __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
  32396. poly8x8_t __ret;
  32397. __ret = (poly8x8_t)(__p0);
  32398. return __ret;
  32399. }
  32400. #else
  32401. __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
  32402. poly8x8_t __ret;
  32403. __ret = (poly8x8_t)(__p0);
  32404. return __ret;
  32405. }
  32406. #endif
  32407. #ifdef __LITTLE_ENDIAN__
  32408. __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
  32409. poly8x8_t __ret;
  32410. __ret = (poly8x8_t)(__p0);
  32411. return __ret;
  32412. }
  32413. #else
  32414. __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
  32415. poly8x8_t __ret;
  32416. __ret = (poly8x8_t)(__p0);
  32417. return __ret;
  32418. }
  32419. #endif
  32420. #ifdef __LITTLE_ENDIAN__
  32421. __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
  32422. poly64x1_t __ret;
  32423. __ret = (poly64x1_t)(__p0);
  32424. return __ret;
  32425. }
  32426. #else
  32427. __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
  32428. poly64x1_t __ret;
  32429. __ret = (poly64x1_t)(__p0);
  32430. return __ret;
  32431. }
  32432. #endif
  32433. #ifdef __LITTLE_ENDIAN__
  32434. __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
  32435. poly64x1_t __ret;
  32436. __ret = (poly64x1_t)(__p0);
  32437. return __ret;
  32438. }
  32439. #else
  32440. __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
  32441. poly64x1_t __ret;
  32442. __ret = (poly64x1_t)(__p0);
  32443. return __ret;
  32444. }
  32445. #endif
  32446. #ifdef __LITTLE_ENDIAN__
  32447. __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
  32448. poly64x1_t __ret;
  32449. __ret = (poly64x1_t)(__p0);
  32450. return __ret;
  32451. }
  32452. #else
  32453. __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
  32454. poly64x1_t __ret;
  32455. __ret = (poly64x1_t)(__p0);
  32456. return __ret;
  32457. }
  32458. #endif
  32459. #ifdef __LITTLE_ENDIAN__
  32460. __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
  32461. poly64x1_t __ret;
  32462. __ret = (poly64x1_t)(__p0);
  32463. return __ret;
  32464. }
  32465. #else
  32466. __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
  32467. poly64x1_t __ret;
  32468. __ret = (poly64x1_t)(__p0);
  32469. return __ret;
  32470. }
  32471. #endif
  32472. #ifdef __LITTLE_ENDIAN__
  32473. __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
  32474. poly64x1_t __ret;
  32475. __ret = (poly64x1_t)(__p0);
  32476. return __ret;
  32477. }
  32478. #else
  32479. __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
  32480. poly64x1_t __ret;
  32481. __ret = (poly64x1_t)(__p0);
  32482. return __ret;
  32483. }
  32484. #endif
  32485. #ifdef __LITTLE_ENDIAN__
  32486. __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
  32487. poly64x1_t __ret;
  32488. __ret = (poly64x1_t)(__p0);
  32489. return __ret;
  32490. }
  32491. #else
  32492. __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
  32493. poly64x1_t __ret;
  32494. __ret = (poly64x1_t)(__p0);
  32495. return __ret;
  32496. }
  32497. #endif
  32498. #ifdef __LITTLE_ENDIAN__
  32499. __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
  32500. poly64x1_t __ret;
  32501. __ret = (poly64x1_t)(__p0);
  32502. return __ret;
  32503. }
  32504. #else
  32505. __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
  32506. poly64x1_t __ret;
  32507. __ret = (poly64x1_t)(__p0);
  32508. return __ret;
  32509. }
  32510. #endif
  32511. #ifdef __LITTLE_ENDIAN__
  32512. __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
  32513. poly64x1_t __ret;
  32514. __ret = (poly64x1_t)(__p0);
  32515. return __ret;
  32516. }
  32517. #else
  32518. __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
  32519. poly64x1_t __ret;
  32520. __ret = (poly64x1_t)(__p0);
  32521. return __ret;
  32522. }
  32523. #endif
  32524. #ifdef __LITTLE_ENDIAN__
  32525. __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
  32526. poly64x1_t __ret;
  32527. __ret = (poly64x1_t)(__p0);
  32528. return __ret;
  32529. }
  32530. #else
  32531. __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
  32532. poly64x1_t __ret;
  32533. __ret = (poly64x1_t)(__p0);
  32534. return __ret;
  32535. }
  32536. #endif
  32537. #ifdef __LITTLE_ENDIAN__
  32538. __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
  32539. poly64x1_t __ret;
  32540. __ret = (poly64x1_t)(__p0);
  32541. return __ret;
  32542. }
  32543. #else
  32544. __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
  32545. poly64x1_t __ret;
  32546. __ret = (poly64x1_t)(__p0);
  32547. return __ret;
  32548. }
  32549. #endif
  32550. #ifdef __LITTLE_ENDIAN__
  32551. __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
  32552. poly64x1_t __ret;
  32553. __ret = (poly64x1_t)(__p0);
  32554. return __ret;
  32555. }
  32556. #else
  32557. __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
  32558. poly64x1_t __ret;
  32559. __ret = (poly64x1_t)(__p0);
  32560. return __ret;
  32561. }
  32562. #endif
  32563. #ifdef __LITTLE_ENDIAN__
  32564. __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
  32565. poly64x1_t __ret;
  32566. __ret = (poly64x1_t)(__p0);
  32567. return __ret;
  32568. }
  32569. #else
  32570. __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
  32571. poly64x1_t __ret;
  32572. __ret = (poly64x1_t)(__p0);
  32573. return __ret;
  32574. }
  32575. #endif
  32576. #ifdef __LITTLE_ENDIAN__
  32577. __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
  32578. poly64x1_t __ret;
  32579. __ret = (poly64x1_t)(__p0);
  32580. return __ret;
  32581. }
  32582. #else
  32583. __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
  32584. poly64x1_t __ret;
  32585. __ret = (poly64x1_t)(__p0);
  32586. return __ret;
  32587. }
  32588. #endif
  32589. #ifdef __LITTLE_ENDIAN__
  32590. __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
  32591. poly16x4_t __ret;
  32592. __ret = (poly16x4_t)(__p0);
  32593. return __ret;
  32594. }
  32595. #else
  32596. __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
  32597. poly16x4_t __ret;
  32598. __ret = (poly16x4_t)(__p0);
  32599. return __ret;
  32600. }
  32601. #endif
  32602. #ifdef __LITTLE_ENDIAN__
  32603. __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
  32604. poly16x4_t __ret;
  32605. __ret = (poly16x4_t)(__p0);
  32606. return __ret;
  32607. }
  32608. #else
  32609. __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
  32610. poly16x4_t __ret;
  32611. __ret = (poly16x4_t)(__p0);
  32612. return __ret;
  32613. }
  32614. #endif
  32615. #ifdef __LITTLE_ENDIAN__
  32616. __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
  32617. poly16x4_t __ret;
  32618. __ret = (poly16x4_t)(__p0);
  32619. return __ret;
  32620. }
  32621. #else
  32622. __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
  32623. poly16x4_t __ret;
  32624. __ret = (poly16x4_t)(__p0);
  32625. return __ret;
  32626. }
  32627. #endif
  32628. #ifdef __LITTLE_ENDIAN__
  32629. __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
  32630. poly16x4_t __ret;
  32631. __ret = (poly16x4_t)(__p0);
  32632. return __ret;
  32633. }
  32634. #else
  32635. __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
  32636. poly16x4_t __ret;
  32637. __ret = (poly16x4_t)(__p0);
  32638. return __ret;
  32639. }
  32640. #endif
  32641. #ifdef __LITTLE_ENDIAN__
  32642. __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
  32643. poly16x4_t __ret;
  32644. __ret = (poly16x4_t)(__p0);
  32645. return __ret;
  32646. }
  32647. #else
  32648. __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
  32649. poly16x4_t __ret;
  32650. __ret = (poly16x4_t)(__p0);
  32651. return __ret;
  32652. }
  32653. #endif
  32654. #ifdef __LITTLE_ENDIAN__
  32655. __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
  32656. poly16x4_t __ret;
  32657. __ret = (poly16x4_t)(__p0);
  32658. return __ret;
  32659. }
  32660. #else
  32661. __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
  32662. poly16x4_t __ret;
  32663. __ret = (poly16x4_t)(__p0);
  32664. return __ret;
  32665. }
  32666. #endif
  32667. #ifdef __LITTLE_ENDIAN__
  32668. __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
  32669. poly16x4_t __ret;
  32670. __ret = (poly16x4_t)(__p0);
  32671. return __ret;
  32672. }
  32673. #else
  32674. __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
  32675. poly16x4_t __ret;
  32676. __ret = (poly16x4_t)(__p0);
  32677. return __ret;
  32678. }
  32679. #endif
  32680. #ifdef __LITTLE_ENDIAN__
  32681. __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
  32682. poly16x4_t __ret;
  32683. __ret = (poly16x4_t)(__p0);
  32684. return __ret;
  32685. }
  32686. #else
  32687. __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
  32688. poly16x4_t __ret;
  32689. __ret = (poly16x4_t)(__p0);
  32690. return __ret;
  32691. }
  32692. #endif
  32693. #ifdef __LITTLE_ENDIAN__
  32694. __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
  32695. poly16x4_t __ret;
  32696. __ret = (poly16x4_t)(__p0);
  32697. return __ret;
  32698. }
  32699. #else
  32700. __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
  32701. poly16x4_t __ret;
  32702. __ret = (poly16x4_t)(__p0);
  32703. return __ret;
  32704. }
  32705. #endif
  32706. #ifdef __LITTLE_ENDIAN__
  32707. __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
  32708. poly16x4_t __ret;
  32709. __ret = (poly16x4_t)(__p0);
  32710. return __ret;
  32711. }
  32712. #else
  32713. __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
  32714. poly16x4_t __ret;
  32715. __ret = (poly16x4_t)(__p0);
  32716. return __ret;
  32717. }
  32718. #endif
  32719. #ifdef __LITTLE_ENDIAN__
  32720. __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
  32721. poly16x4_t __ret;
  32722. __ret = (poly16x4_t)(__p0);
  32723. return __ret;
  32724. }
  32725. #else
  32726. __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
  32727. poly16x4_t __ret;
  32728. __ret = (poly16x4_t)(__p0);
  32729. return __ret;
  32730. }
  32731. #endif
  32732. #ifdef __LITTLE_ENDIAN__
  32733. __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
  32734. poly16x4_t __ret;
  32735. __ret = (poly16x4_t)(__p0);
  32736. return __ret;
  32737. }
  32738. #else
  32739. __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
  32740. poly16x4_t __ret;
  32741. __ret = (poly16x4_t)(__p0);
  32742. return __ret;
  32743. }
  32744. #endif
  32745. #ifdef __LITTLE_ENDIAN__
  32746. __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
  32747. poly16x4_t __ret;
  32748. __ret = (poly16x4_t)(__p0);
  32749. return __ret;
  32750. }
  32751. #else
  32752. __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
  32753. poly16x4_t __ret;
  32754. __ret = (poly16x4_t)(__p0);
  32755. return __ret;
  32756. }
  32757. #endif
  32758. #ifdef __LITTLE_ENDIAN__
  32759. __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
  32760. poly8x16_t __ret;
  32761. __ret = (poly8x16_t)(__p0);
  32762. return __ret;
  32763. }
  32764. #else
  32765. __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
  32766. poly8x16_t __ret;
  32767. __ret = (poly8x16_t)(__p0);
  32768. return __ret;
  32769. }
  32770. #endif
  32771. #ifdef __LITTLE_ENDIAN__
  32772. __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
  32773. poly8x16_t __ret;
  32774. __ret = (poly8x16_t)(__p0);
  32775. return __ret;
  32776. }
  32777. #else
  32778. __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
  32779. poly8x16_t __ret;
  32780. __ret = (poly8x16_t)(__p0);
  32781. return __ret;
  32782. }
  32783. #endif
  32784. #ifdef __LITTLE_ENDIAN__
  32785. __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
  32786. poly8x16_t __ret;
  32787. __ret = (poly8x16_t)(__p0);
  32788. return __ret;
  32789. }
  32790. #else
  32791. __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
  32792. poly8x16_t __ret;
  32793. __ret = (poly8x16_t)(__p0);
  32794. return __ret;
  32795. }
  32796. #endif
  32797. #ifdef __LITTLE_ENDIAN__
  32798. __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
  32799. poly8x16_t __ret;
  32800. __ret = (poly8x16_t)(__p0);
  32801. return __ret;
  32802. }
  32803. #else
  32804. __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
  32805. poly8x16_t __ret;
  32806. __ret = (poly8x16_t)(__p0);
  32807. return __ret;
  32808. }
  32809. #endif
  32810. #ifdef __LITTLE_ENDIAN__
  32811. __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
  32812. poly8x16_t __ret;
  32813. __ret = (poly8x16_t)(__p0);
  32814. return __ret;
  32815. }
  32816. #else
  32817. __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
  32818. poly8x16_t __ret;
  32819. __ret = (poly8x16_t)(__p0);
  32820. return __ret;
  32821. }
  32822. #endif
  32823. #ifdef __LITTLE_ENDIAN__
  32824. __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
  32825. poly8x16_t __ret;
  32826. __ret = (poly8x16_t)(__p0);
  32827. return __ret;
  32828. }
  32829. #else
  32830. __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
  32831. poly8x16_t __ret;
  32832. __ret = (poly8x16_t)(__p0);
  32833. return __ret;
  32834. }
  32835. #endif
  32836. #ifdef __LITTLE_ENDIAN__
  32837. __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
  32838. poly8x16_t __ret;
  32839. __ret = (poly8x16_t)(__p0);
  32840. return __ret;
  32841. }
  32842. #else
  32843. __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
  32844. poly8x16_t __ret;
  32845. __ret = (poly8x16_t)(__p0);
  32846. return __ret;
  32847. }
  32848. #endif
  32849. #ifdef __LITTLE_ENDIAN__
  32850. __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
  32851. poly8x16_t __ret;
  32852. __ret = (poly8x16_t)(__p0);
  32853. return __ret;
  32854. }
  32855. #else
  32856. __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
  32857. poly8x16_t __ret;
  32858. __ret = (poly8x16_t)(__p0);
  32859. return __ret;
  32860. }
  32861. #endif
  32862. #ifdef __LITTLE_ENDIAN__
  32863. __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
  32864. poly8x16_t __ret;
  32865. __ret = (poly8x16_t)(__p0);
  32866. return __ret;
  32867. }
  32868. #else
  32869. __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
  32870. poly8x16_t __ret;
  32871. __ret = (poly8x16_t)(__p0);
  32872. return __ret;
  32873. }
  32874. #endif
  32875. #ifdef __LITTLE_ENDIAN__
  32876. __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
  32877. poly8x16_t __ret;
  32878. __ret = (poly8x16_t)(__p0);
  32879. return __ret;
  32880. }
  32881. #else
  32882. __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
  32883. poly8x16_t __ret;
  32884. __ret = (poly8x16_t)(__p0);
  32885. return __ret;
  32886. }
  32887. #endif
  32888. #ifdef __LITTLE_ENDIAN__
  32889. __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
  32890. poly8x16_t __ret;
  32891. __ret = (poly8x16_t)(__p0);
  32892. return __ret;
  32893. }
  32894. #else
  32895. __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
  32896. poly8x16_t __ret;
  32897. __ret = (poly8x16_t)(__p0);
  32898. return __ret;
  32899. }
  32900. #endif
  32901. #ifdef __LITTLE_ENDIAN__
  32902. __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
  32903. poly8x16_t __ret;
  32904. __ret = (poly8x16_t)(__p0);
  32905. return __ret;
  32906. }
  32907. #else
  32908. __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
  32909. poly8x16_t __ret;
  32910. __ret = (poly8x16_t)(__p0);
  32911. return __ret;
  32912. }
  32913. #endif
  32914. #ifdef __LITTLE_ENDIAN__
  32915. __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
  32916. poly8x16_t __ret;
  32917. __ret = (poly8x16_t)(__p0);
  32918. return __ret;
  32919. }
  32920. #else
  32921. __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
  32922. poly8x16_t __ret;
  32923. __ret = (poly8x16_t)(__p0);
  32924. return __ret;
  32925. }
  32926. #endif
  32927. #ifdef __LITTLE_ENDIAN__
  32928. __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
  32929. poly8x16_t __ret;
  32930. __ret = (poly8x16_t)(__p0);
  32931. return __ret;
  32932. }
  32933. #else
  32934. __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
  32935. poly8x16_t __ret;
  32936. __ret = (poly8x16_t)(__p0);
  32937. return __ret;
  32938. }
  32939. #endif
  32940. #ifdef __LITTLE_ENDIAN__
  32941. __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
  32942. poly128_t __ret;
  32943. __ret = (poly128_t)(__p0);
  32944. return __ret;
  32945. }
  32946. #else
  32947. __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
  32948. poly128_t __ret;
  32949. __ret = (poly128_t)(__p0);
  32950. return __ret;
  32951. }
  32952. #endif
  32953. #ifdef __LITTLE_ENDIAN__
  32954. __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
  32955. poly128_t __ret;
  32956. __ret = (poly128_t)(__p0);
  32957. return __ret;
  32958. }
  32959. #else
  32960. __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
  32961. poly128_t __ret;
  32962. __ret = (poly128_t)(__p0);
  32963. return __ret;
  32964. }
  32965. #endif
  32966. #ifdef __LITTLE_ENDIAN__
  32967. __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
  32968. poly128_t __ret;
  32969. __ret = (poly128_t)(__p0);
  32970. return __ret;
  32971. }
  32972. #else
  32973. __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
  32974. poly128_t __ret;
  32975. __ret = (poly128_t)(__p0);
  32976. return __ret;
  32977. }
  32978. #endif
  32979. #ifdef __LITTLE_ENDIAN__
  32980. __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
  32981. poly128_t __ret;
  32982. __ret = (poly128_t)(__p0);
  32983. return __ret;
  32984. }
  32985. #else
  32986. __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
  32987. poly128_t __ret;
  32988. __ret = (poly128_t)(__p0);
  32989. return __ret;
  32990. }
  32991. #endif
  32992. #ifdef __LITTLE_ENDIAN__
  32993. __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
  32994. poly128_t __ret;
  32995. __ret = (poly128_t)(__p0);
  32996. return __ret;
  32997. }
  32998. #else
  32999. __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
  33000. poly128_t __ret;
  33001. __ret = (poly128_t)(__p0);
  33002. return __ret;
  33003. }
  33004. #endif
  33005. #ifdef __LITTLE_ENDIAN__
  33006. __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
  33007. poly128_t __ret;
  33008. __ret = (poly128_t)(__p0);
  33009. return __ret;
  33010. }
  33011. #else
  33012. __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
  33013. poly128_t __ret;
  33014. __ret = (poly128_t)(__p0);
  33015. return __ret;
  33016. }
  33017. #endif
  33018. #ifdef __LITTLE_ENDIAN__
  33019. __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
  33020. poly128_t __ret;
  33021. __ret = (poly128_t)(__p0);
  33022. return __ret;
  33023. }
  33024. #else
  33025. __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
  33026. poly128_t __ret;
  33027. __ret = (poly128_t)(__p0);
  33028. return __ret;
  33029. }
  33030. #endif
  33031. #ifdef __LITTLE_ENDIAN__
  33032. __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
  33033. poly128_t __ret;
  33034. __ret = (poly128_t)(__p0);
  33035. return __ret;
  33036. }
  33037. #else
  33038. __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
  33039. poly128_t __ret;
  33040. __ret = (poly128_t)(__p0);
  33041. return __ret;
  33042. }
  33043. #endif
  33044. #ifdef __LITTLE_ENDIAN__
  33045. __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
  33046. poly128_t __ret;
  33047. __ret = (poly128_t)(__p0);
  33048. return __ret;
  33049. }
  33050. #else
  33051. __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
  33052. poly128_t __ret;
  33053. __ret = (poly128_t)(__p0);
  33054. return __ret;
  33055. }
  33056. #endif
  33057. #ifdef __LITTLE_ENDIAN__
  33058. __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
  33059. poly128_t __ret;
  33060. __ret = (poly128_t)(__p0);
  33061. return __ret;
  33062. }
  33063. #else
  33064. __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
  33065. poly128_t __ret;
  33066. __ret = (poly128_t)(__p0);
  33067. return __ret;
  33068. }
  33069. #endif
  33070. #ifdef __LITTLE_ENDIAN__
  33071. __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
  33072. poly128_t __ret;
  33073. __ret = (poly128_t)(__p0);
  33074. return __ret;
  33075. }
  33076. #else
  33077. __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
  33078. poly128_t __ret;
  33079. __ret = (poly128_t)(__p0);
  33080. return __ret;
  33081. }
  33082. #endif
  33083. #ifdef __LITTLE_ENDIAN__
  33084. __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
  33085. poly128_t __ret;
  33086. __ret = (poly128_t)(__p0);
  33087. return __ret;
  33088. }
  33089. #else
  33090. __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
  33091. poly128_t __ret;
  33092. __ret = (poly128_t)(__p0);
  33093. return __ret;
  33094. }
  33095. #endif
  33096. #ifdef __LITTLE_ENDIAN__
  33097. __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
  33098. poly128_t __ret;
  33099. __ret = (poly128_t)(__p0);
  33100. return __ret;
  33101. }
  33102. #else
  33103. __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
  33104. poly128_t __ret;
  33105. __ret = (poly128_t)(__p0);
  33106. return __ret;
  33107. }
  33108. #endif
  33109. #ifdef __LITTLE_ENDIAN__
  33110. __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
  33111. poly128_t __ret;
  33112. __ret = (poly128_t)(__p0);
  33113. return __ret;
  33114. }
  33115. #else
  33116. __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
  33117. poly128_t __ret;
  33118. __ret = (poly128_t)(__p0);
  33119. return __ret;
  33120. }
  33121. #endif
  33122. #ifdef __LITTLE_ENDIAN__
  33123. __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
  33124. poly64x2_t __ret;
  33125. __ret = (poly64x2_t)(__p0);
  33126. return __ret;
  33127. }
  33128. #else
  33129. __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
  33130. poly64x2_t __ret;
  33131. __ret = (poly64x2_t)(__p0);
  33132. return __ret;
  33133. }
  33134. #endif
  33135. #ifdef __LITTLE_ENDIAN__
  33136. __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
  33137. poly64x2_t __ret;
  33138. __ret = (poly64x2_t)(__p0);
  33139. return __ret;
  33140. }
  33141. #else
  33142. __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
  33143. poly64x2_t __ret;
  33144. __ret = (poly64x2_t)(__p0);
  33145. return __ret;
  33146. }
  33147. #endif
  33148. #ifdef __LITTLE_ENDIAN__
  33149. __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
  33150. poly64x2_t __ret;
  33151. __ret = (poly64x2_t)(__p0);
  33152. return __ret;
  33153. }
  33154. #else
  33155. __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
  33156. poly64x2_t __ret;
  33157. __ret = (poly64x2_t)(__p0);
  33158. return __ret;
  33159. }
  33160. #endif
  33161. #ifdef __LITTLE_ENDIAN__
  33162. __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
  33163. poly64x2_t __ret;
  33164. __ret = (poly64x2_t)(__p0);
  33165. return __ret;
  33166. }
  33167. #else
  33168. __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
  33169. poly64x2_t __ret;
  33170. __ret = (poly64x2_t)(__p0);
  33171. return __ret;
  33172. }
  33173. #endif
  33174. #ifdef __LITTLE_ENDIAN__
  33175. __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
  33176. poly64x2_t __ret;
  33177. __ret = (poly64x2_t)(__p0);
  33178. return __ret;
  33179. }
  33180. #else
  33181. __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
  33182. poly64x2_t __ret;
  33183. __ret = (poly64x2_t)(__p0);
  33184. return __ret;
  33185. }
  33186. #endif
  33187. #ifdef __LITTLE_ENDIAN__
  33188. __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
  33189. poly64x2_t __ret;
  33190. __ret = (poly64x2_t)(__p0);
  33191. return __ret;
  33192. }
  33193. #else
  33194. __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
  33195. poly64x2_t __ret;
  33196. __ret = (poly64x2_t)(__p0);
  33197. return __ret;
  33198. }
  33199. #endif
  33200. #ifdef __LITTLE_ENDIAN__
  33201. __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
  33202. poly64x2_t __ret;
  33203. __ret = (poly64x2_t)(__p0);
  33204. return __ret;
  33205. }
  33206. #else
  33207. __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
  33208. poly64x2_t __ret;
  33209. __ret = (poly64x2_t)(__p0);
  33210. return __ret;
  33211. }
  33212. #endif
  33213. #ifdef __LITTLE_ENDIAN__
  33214. __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
  33215. poly64x2_t __ret;
  33216. __ret = (poly64x2_t)(__p0);
  33217. return __ret;
  33218. }
  33219. #else
  33220. __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
  33221. poly64x2_t __ret;
  33222. __ret = (poly64x2_t)(__p0);
  33223. return __ret;
  33224. }
  33225. #endif
  33226. #ifdef __LITTLE_ENDIAN__
  33227. __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
  33228. poly64x2_t __ret;
  33229. __ret = (poly64x2_t)(__p0);
  33230. return __ret;
  33231. }
  33232. #else
  33233. __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
  33234. poly64x2_t __ret;
  33235. __ret = (poly64x2_t)(__p0);
  33236. return __ret;
  33237. }
  33238. #endif
  33239. #ifdef __LITTLE_ENDIAN__
  33240. __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
  33241. poly64x2_t __ret;
  33242. __ret = (poly64x2_t)(__p0);
  33243. return __ret;
  33244. }
  33245. #else
  33246. __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
  33247. poly64x2_t __ret;
  33248. __ret = (poly64x2_t)(__p0);
  33249. return __ret;
  33250. }
  33251. #endif
  33252. #ifdef __LITTLE_ENDIAN__
  33253. __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
  33254. poly64x2_t __ret;
  33255. __ret = (poly64x2_t)(__p0);
  33256. return __ret;
  33257. }
  33258. #else
  33259. __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
  33260. poly64x2_t __ret;
  33261. __ret = (poly64x2_t)(__p0);
  33262. return __ret;
  33263. }
  33264. #endif
  33265. #ifdef __LITTLE_ENDIAN__
  33266. __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
  33267. poly64x2_t __ret;
  33268. __ret = (poly64x2_t)(__p0);
  33269. return __ret;
  33270. }
  33271. #else
  33272. __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
  33273. poly64x2_t __ret;
  33274. __ret = (poly64x2_t)(__p0);
  33275. return __ret;
  33276. }
  33277. #endif
  33278. #ifdef __LITTLE_ENDIAN__
  33279. __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
  33280. poly64x2_t __ret;
  33281. __ret = (poly64x2_t)(__p0);
  33282. return __ret;
  33283. }
  33284. #else
  33285. __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
  33286. poly64x2_t __ret;
  33287. __ret = (poly64x2_t)(__p0);
  33288. return __ret;
  33289. }
  33290. #endif
  33291. #ifdef __LITTLE_ENDIAN__
  33292. __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
  33293. poly64x2_t __ret;
  33294. __ret = (poly64x2_t)(__p0);
  33295. return __ret;
  33296. }
  33297. #else
  33298. __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
  33299. poly64x2_t __ret;
  33300. __ret = (poly64x2_t)(__p0);
  33301. return __ret;
  33302. }
  33303. #endif
  33304. #ifdef __LITTLE_ENDIAN__
  33305. __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
  33306. poly16x8_t __ret;
  33307. __ret = (poly16x8_t)(__p0);
  33308. return __ret;
  33309. }
  33310. #else
  33311. __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
  33312. poly16x8_t __ret;
  33313. __ret = (poly16x8_t)(__p0);
  33314. return __ret;
  33315. }
  33316. #endif
  33317. #ifdef __LITTLE_ENDIAN__
  33318. __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
  33319. poly16x8_t __ret;
  33320. __ret = (poly16x8_t)(__p0);
  33321. return __ret;
  33322. }
  33323. #else
  33324. __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
  33325. poly16x8_t __ret;
  33326. __ret = (poly16x8_t)(__p0);
  33327. return __ret;
  33328. }
  33329. #endif
  33330. #ifdef __LITTLE_ENDIAN__
  33331. __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
  33332. poly16x8_t __ret;
  33333. __ret = (poly16x8_t)(__p0);
  33334. return __ret;
  33335. }
  33336. #else
  33337. __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
  33338. poly16x8_t __ret;
  33339. __ret = (poly16x8_t)(__p0);
  33340. return __ret;
  33341. }
  33342. #endif
  33343. #ifdef __LITTLE_ENDIAN__
  33344. __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
  33345. poly16x8_t __ret;
  33346. __ret = (poly16x8_t)(__p0);
  33347. return __ret;
  33348. }
  33349. #else
  33350. __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
  33351. poly16x8_t __ret;
  33352. __ret = (poly16x8_t)(__p0);
  33353. return __ret;
  33354. }
  33355. #endif
  33356. #ifdef __LITTLE_ENDIAN__
  33357. __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
  33358. poly16x8_t __ret;
  33359. __ret = (poly16x8_t)(__p0);
  33360. return __ret;
  33361. }
  33362. #else
  33363. __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
  33364. poly16x8_t __ret;
  33365. __ret = (poly16x8_t)(__p0);
  33366. return __ret;
  33367. }
  33368. #endif
  33369. #ifdef __LITTLE_ENDIAN__
  33370. __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
  33371. poly16x8_t __ret;
  33372. __ret = (poly16x8_t)(__p0);
  33373. return __ret;
  33374. }
  33375. #else
  33376. __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
  33377. poly16x8_t __ret;
  33378. __ret = (poly16x8_t)(__p0);
  33379. return __ret;
  33380. }
  33381. #endif
  33382. #ifdef __LITTLE_ENDIAN__
  33383. __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
  33384. poly16x8_t __ret;
  33385. __ret = (poly16x8_t)(__p0);
  33386. return __ret;
  33387. }
  33388. #else
  33389. __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
  33390. poly16x8_t __ret;
  33391. __ret = (poly16x8_t)(__p0);
  33392. return __ret;
  33393. }
  33394. #endif
  33395. #ifdef __LITTLE_ENDIAN__
  33396. __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
  33397. poly16x8_t __ret;
  33398. __ret = (poly16x8_t)(__p0);
  33399. return __ret;
  33400. }
  33401. #else
  33402. __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
  33403. poly16x8_t __ret;
  33404. __ret = (poly16x8_t)(__p0);
  33405. return __ret;
  33406. }
  33407. #endif
  33408. #ifdef __LITTLE_ENDIAN__
  33409. __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
  33410. poly16x8_t __ret;
  33411. __ret = (poly16x8_t)(__p0);
  33412. return __ret;
  33413. }
  33414. #else
  33415. __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
  33416. poly16x8_t __ret;
  33417. __ret = (poly16x8_t)(__p0);
  33418. return __ret;
  33419. }
  33420. #endif
  33421. #ifdef __LITTLE_ENDIAN__
  33422. __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
  33423. poly16x8_t __ret;
  33424. __ret = (poly16x8_t)(__p0);
  33425. return __ret;
  33426. }
  33427. #else
  33428. __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
  33429. poly16x8_t __ret;
  33430. __ret = (poly16x8_t)(__p0);
  33431. return __ret;
  33432. }
  33433. #endif
  33434. #ifdef __LITTLE_ENDIAN__
  33435. __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
  33436. poly16x8_t __ret;
  33437. __ret = (poly16x8_t)(__p0);
  33438. return __ret;
  33439. }
  33440. #else
  33441. __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
  33442. poly16x8_t __ret;
  33443. __ret = (poly16x8_t)(__p0);
  33444. return __ret;
  33445. }
  33446. #endif
  33447. #ifdef __LITTLE_ENDIAN__
  33448. __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
  33449. poly16x8_t __ret;
  33450. __ret = (poly16x8_t)(__p0);
  33451. return __ret;
  33452. }
  33453. #else
  33454. __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
  33455. poly16x8_t __ret;
  33456. __ret = (poly16x8_t)(__p0);
  33457. return __ret;
  33458. }
  33459. #endif
  33460. #ifdef __LITTLE_ENDIAN__
  33461. __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
  33462. poly16x8_t __ret;
  33463. __ret = (poly16x8_t)(__p0);
  33464. return __ret;
  33465. }
  33466. #else
  33467. __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
  33468. poly16x8_t __ret;
  33469. __ret = (poly16x8_t)(__p0);
  33470. return __ret;
  33471. }
  33472. #endif
  33473. #ifdef __LITTLE_ENDIAN__
  33474. __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
  33475. poly16x8_t __ret;
  33476. __ret = (poly16x8_t)(__p0);
  33477. return __ret;
  33478. }
  33479. #else
  33480. __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
  33481. poly16x8_t __ret;
  33482. __ret = (poly16x8_t)(__p0);
  33483. return __ret;
  33484. }
  33485. #endif
  33486. #ifdef __LITTLE_ENDIAN__
  33487. __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
  33488. uint8x16_t __ret;
  33489. __ret = (uint8x16_t)(__p0);
  33490. return __ret;
  33491. }
  33492. #else
  33493. __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
  33494. uint8x16_t __ret;
  33495. __ret = (uint8x16_t)(__p0);
  33496. return __ret;
  33497. }
  33498. #endif
  33499. #ifdef __LITTLE_ENDIAN__
  33500. __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
  33501. uint8x16_t __ret;
  33502. __ret = (uint8x16_t)(__p0);
  33503. return __ret;
  33504. }
  33505. #else
  33506. __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
  33507. uint8x16_t __ret;
  33508. __ret = (uint8x16_t)(__p0);
  33509. return __ret;
  33510. }
  33511. #endif
  33512. #ifdef __LITTLE_ENDIAN__
  33513. __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
  33514. uint8x16_t __ret;
  33515. __ret = (uint8x16_t)(__p0);
  33516. return __ret;
  33517. }
  33518. #else
  33519. __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
  33520. uint8x16_t __ret;
  33521. __ret = (uint8x16_t)(__p0);
  33522. return __ret;
  33523. }
  33524. #endif
  33525. #ifdef __LITTLE_ENDIAN__
  33526. __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
  33527. uint8x16_t __ret;
  33528. __ret = (uint8x16_t)(__p0);
  33529. return __ret;
  33530. }
  33531. #else
  33532. __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
  33533. uint8x16_t __ret;
  33534. __ret = (uint8x16_t)(__p0);
  33535. return __ret;
  33536. }
  33537. #endif
  33538. #ifdef __LITTLE_ENDIAN__
  33539. __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
  33540. uint8x16_t __ret;
  33541. __ret = (uint8x16_t)(__p0);
  33542. return __ret;
  33543. }
  33544. #else
  33545. __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
  33546. uint8x16_t __ret;
  33547. __ret = (uint8x16_t)(__p0);
  33548. return __ret;
  33549. }
  33550. #endif
  33551. #ifdef __LITTLE_ENDIAN__
  33552. __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
  33553. uint8x16_t __ret;
  33554. __ret = (uint8x16_t)(__p0);
  33555. return __ret;
  33556. }
  33557. #else
  33558. __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
  33559. uint8x16_t __ret;
  33560. __ret = (uint8x16_t)(__p0);
  33561. return __ret;
  33562. }
  33563. #endif
  33564. #ifdef __LITTLE_ENDIAN__
  33565. __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
  33566. uint8x16_t __ret;
  33567. __ret = (uint8x16_t)(__p0);
  33568. return __ret;
  33569. }
  33570. #else
  33571. __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
  33572. uint8x16_t __ret;
  33573. __ret = (uint8x16_t)(__p0);
  33574. return __ret;
  33575. }
  33576. #endif
  33577. #ifdef __LITTLE_ENDIAN__
  33578. __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
  33579. uint8x16_t __ret;
  33580. __ret = (uint8x16_t)(__p0);
  33581. return __ret;
  33582. }
  33583. #else
  33584. __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
  33585. uint8x16_t __ret;
  33586. __ret = (uint8x16_t)(__p0);
  33587. return __ret;
  33588. }
  33589. #endif
  33590. #ifdef __LITTLE_ENDIAN__
  33591. __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
  33592. uint8x16_t __ret;
  33593. __ret = (uint8x16_t)(__p0);
  33594. return __ret;
  33595. }
  33596. #else
  33597. __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
  33598. uint8x16_t __ret;
  33599. __ret = (uint8x16_t)(__p0);
  33600. return __ret;
  33601. }
  33602. #endif
  33603. #ifdef __LITTLE_ENDIAN__
  33604. __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
  33605. uint8x16_t __ret;
  33606. __ret = (uint8x16_t)(__p0);
  33607. return __ret;
  33608. }
  33609. #else
  33610. __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
  33611. uint8x16_t __ret;
  33612. __ret = (uint8x16_t)(__p0);
  33613. return __ret;
  33614. }
  33615. #endif
  33616. #ifdef __LITTLE_ENDIAN__
  33617. __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
  33618. uint8x16_t __ret;
  33619. __ret = (uint8x16_t)(__p0);
  33620. return __ret;
  33621. }
  33622. #else
  33623. __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
  33624. uint8x16_t __ret;
  33625. __ret = (uint8x16_t)(__p0);
  33626. return __ret;
  33627. }
  33628. #endif
  33629. #ifdef __LITTLE_ENDIAN__
  33630. __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
  33631. uint8x16_t __ret;
  33632. __ret = (uint8x16_t)(__p0);
  33633. return __ret;
  33634. }
  33635. #else
  33636. __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
  33637. uint8x16_t __ret;
  33638. __ret = (uint8x16_t)(__p0);
  33639. return __ret;
  33640. }
  33641. #endif
  33642. #ifdef __LITTLE_ENDIAN__
  33643. __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
  33644. uint8x16_t __ret;
  33645. __ret = (uint8x16_t)(__p0);
  33646. return __ret;
  33647. }
  33648. #else
  33649. __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
  33650. uint8x16_t __ret;
  33651. __ret = (uint8x16_t)(__p0);
  33652. return __ret;
  33653. }
  33654. #endif
  33655. #ifdef __LITTLE_ENDIAN__
  33656. __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
  33657. uint8x16_t __ret;
  33658. __ret = (uint8x16_t)(__p0);
  33659. return __ret;
  33660. }
  33661. #else
  33662. __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
  33663. uint8x16_t __ret;
  33664. __ret = (uint8x16_t)(__p0);
  33665. return __ret;
  33666. }
  33667. #endif
  33668. #ifdef __LITTLE_ENDIAN__
  33669. __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
  33670. uint32x4_t __ret;
  33671. __ret = (uint32x4_t)(__p0);
  33672. return __ret;
  33673. }
  33674. #else
  33675. __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
  33676. uint32x4_t __ret;
  33677. __ret = (uint32x4_t)(__p0);
  33678. return __ret;
  33679. }
  33680. #endif
  33681. #ifdef __LITTLE_ENDIAN__
  33682. __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
  33683. uint32x4_t __ret;
  33684. __ret = (uint32x4_t)(__p0);
  33685. return __ret;
  33686. }
  33687. #else
  33688. __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
  33689. uint32x4_t __ret;
  33690. __ret = (uint32x4_t)(__p0);
  33691. return __ret;
  33692. }
  33693. #endif
  33694. #ifdef __LITTLE_ENDIAN__
  33695. __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
  33696. uint32x4_t __ret;
  33697. __ret = (uint32x4_t)(__p0);
  33698. return __ret;
  33699. }
  33700. #else
  33701. __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
  33702. uint32x4_t __ret;
  33703. __ret = (uint32x4_t)(__p0);
  33704. return __ret;
  33705. }
  33706. #endif
  33707. #ifdef __LITTLE_ENDIAN__
  33708. __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
  33709. uint32x4_t __ret;
  33710. __ret = (uint32x4_t)(__p0);
  33711. return __ret;
  33712. }
  33713. #else
  33714. __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
  33715. uint32x4_t __ret;
  33716. __ret = (uint32x4_t)(__p0);
  33717. return __ret;
  33718. }
  33719. #endif
  33720. #ifdef __LITTLE_ENDIAN__
  33721. __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
  33722. uint32x4_t __ret;
  33723. __ret = (uint32x4_t)(__p0);
  33724. return __ret;
  33725. }
  33726. #else
  33727. __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
  33728. uint32x4_t __ret;
  33729. __ret = (uint32x4_t)(__p0);
  33730. return __ret;
  33731. }
  33732. #endif
  33733. #ifdef __LITTLE_ENDIAN__
  33734. __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
  33735. uint32x4_t __ret;
  33736. __ret = (uint32x4_t)(__p0);
  33737. return __ret;
  33738. }
  33739. #else
  33740. __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
  33741. uint32x4_t __ret;
  33742. __ret = (uint32x4_t)(__p0);
  33743. return __ret;
  33744. }
  33745. #endif
  33746. #ifdef __LITTLE_ENDIAN__
  33747. __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
  33748. uint32x4_t __ret;
  33749. __ret = (uint32x4_t)(__p0);
  33750. return __ret;
  33751. }
  33752. #else
  33753. __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
  33754. uint32x4_t __ret;
  33755. __ret = (uint32x4_t)(__p0);
  33756. return __ret;
  33757. }
  33758. #endif
  33759. #ifdef __LITTLE_ENDIAN__
  33760. __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
  33761. uint32x4_t __ret;
  33762. __ret = (uint32x4_t)(__p0);
  33763. return __ret;
  33764. }
  33765. #else
  33766. __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
  33767. uint32x4_t __ret;
  33768. __ret = (uint32x4_t)(__p0);
  33769. return __ret;
  33770. }
  33771. #endif
  33772. #ifdef __LITTLE_ENDIAN__
  33773. __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
  33774. uint32x4_t __ret;
  33775. __ret = (uint32x4_t)(__p0);
  33776. return __ret;
  33777. }
  33778. #else
  33779. __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
  33780. uint32x4_t __ret;
  33781. __ret = (uint32x4_t)(__p0);
  33782. return __ret;
  33783. }
  33784. #endif
  33785. #ifdef __LITTLE_ENDIAN__
  33786. __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
  33787. uint32x4_t __ret;
  33788. __ret = (uint32x4_t)(__p0);
  33789. return __ret;
  33790. }
  33791. #else
  33792. __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
  33793. uint32x4_t __ret;
  33794. __ret = (uint32x4_t)(__p0);
  33795. return __ret;
  33796. }
  33797. #endif
  33798. #ifdef __LITTLE_ENDIAN__
  33799. __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
  33800. uint32x4_t __ret;
  33801. __ret = (uint32x4_t)(__p0);
  33802. return __ret;
  33803. }
  33804. #else
  33805. __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
  33806. uint32x4_t __ret;
  33807. __ret = (uint32x4_t)(__p0);
  33808. return __ret;
  33809. }
  33810. #endif
  33811. #ifdef __LITTLE_ENDIAN__
  33812. __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
  33813. uint32x4_t __ret;
  33814. __ret = (uint32x4_t)(__p0);
  33815. return __ret;
  33816. }
  33817. #else
  33818. __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
  33819. uint32x4_t __ret;
  33820. __ret = (uint32x4_t)(__p0);
  33821. return __ret;
  33822. }
  33823. #endif
  33824. #ifdef __LITTLE_ENDIAN__
  33825. __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
  33826. uint32x4_t __ret;
  33827. __ret = (uint32x4_t)(__p0);
  33828. return __ret;
  33829. }
  33830. #else
  33831. __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
  33832. uint32x4_t __ret;
  33833. __ret = (uint32x4_t)(__p0);
  33834. return __ret;
  33835. }
  33836. #endif
  33837. #ifdef __LITTLE_ENDIAN__
  33838. __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
  33839. uint32x4_t __ret;
  33840. __ret = (uint32x4_t)(__p0);
  33841. return __ret;
  33842. }
  33843. #else
  33844. __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
  33845. uint32x4_t __ret;
  33846. __ret = (uint32x4_t)(__p0);
  33847. return __ret;
  33848. }
  33849. #endif
  33850. #ifdef __LITTLE_ENDIAN__
  33851. __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
  33852. uint64x2_t __ret;
  33853. __ret = (uint64x2_t)(__p0);
  33854. return __ret;
  33855. }
  33856. #else
  33857. __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
  33858. uint64x2_t __ret;
  33859. __ret = (uint64x2_t)(__p0);
  33860. return __ret;
  33861. }
  33862. #endif
  33863. #ifdef __LITTLE_ENDIAN__
  33864. __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
  33865. uint64x2_t __ret;
  33866. __ret = (uint64x2_t)(__p0);
  33867. return __ret;
  33868. }
  33869. #else
  33870. __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
  33871. uint64x2_t __ret;
  33872. __ret = (uint64x2_t)(__p0);
  33873. return __ret;
  33874. }
  33875. #endif
  33876. #ifdef __LITTLE_ENDIAN__
  33877. __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
  33878. uint64x2_t __ret;
  33879. __ret = (uint64x2_t)(__p0);
  33880. return __ret;
  33881. }
  33882. #else
  33883. __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
  33884. uint64x2_t __ret;
  33885. __ret = (uint64x2_t)(__p0);
  33886. return __ret;
  33887. }
  33888. #endif
  33889. #ifdef __LITTLE_ENDIAN__
  33890. __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
  33891. uint64x2_t __ret;
  33892. __ret = (uint64x2_t)(__p0);
  33893. return __ret;
  33894. }
  33895. #else
  33896. __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
  33897. uint64x2_t __ret;
  33898. __ret = (uint64x2_t)(__p0);
  33899. return __ret;
  33900. }
  33901. #endif
  33902. #ifdef __LITTLE_ENDIAN__
  33903. __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
  33904. uint64x2_t __ret;
  33905. __ret = (uint64x2_t)(__p0);
  33906. return __ret;
  33907. }
  33908. #else
  33909. __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
  33910. uint64x2_t __ret;
  33911. __ret = (uint64x2_t)(__p0);
  33912. return __ret;
  33913. }
  33914. #endif
  33915. #ifdef __LITTLE_ENDIAN__
  33916. __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
  33917. uint64x2_t __ret;
  33918. __ret = (uint64x2_t)(__p0);
  33919. return __ret;
  33920. }
  33921. #else
  33922. __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
  33923. uint64x2_t __ret;
  33924. __ret = (uint64x2_t)(__p0);
  33925. return __ret;
  33926. }
  33927. #endif
  33928. #ifdef __LITTLE_ENDIAN__
  33929. __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
  33930. uint64x2_t __ret;
  33931. __ret = (uint64x2_t)(__p0);
  33932. return __ret;
  33933. }
  33934. #else
  33935. __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
  33936. uint64x2_t __ret;
  33937. __ret = (uint64x2_t)(__p0);
  33938. return __ret;
  33939. }
  33940. #endif
  33941. #ifdef __LITTLE_ENDIAN__
  33942. __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
  33943. uint64x2_t __ret;
  33944. __ret = (uint64x2_t)(__p0);
  33945. return __ret;
  33946. }
  33947. #else
  33948. __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
  33949. uint64x2_t __ret;
  33950. __ret = (uint64x2_t)(__p0);
  33951. return __ret;
  33952. }
  33953. #endif
  33954. #ifdef __LITTLE_ENDIAN__
  33955. __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
  33956. uint64x2_t __ret;
  33957. __ret = (uint64x2_t)(__p0);
  33958. return __ret;
  33959. }
  33960. #else
  33961. __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
  33962. uint64x2_t __ret;
  33963. __ret = (uint64x2_t)(__p0);
  33964. return __ret;
  33965. }
  33966. #endif
  33967. #ifdef __LITTLE_ENDIAN__
  33968. __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
  33969. uint64x2_t __ret;
  33970. __ret = (uint64x2_t)(__p0);
  33971. return __ret;
  33972. }
  33973. #else
  33974. __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
  33975. uint64x2_t __ret;
  33976. __ret = (uint64x2_t)(__p0);
  33977. return __ret;
  33978. }
  33979. #endif
  33980. #ifdef __LITTLE_ENDIAN__
  33981. __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
  33982. uint64x2_t __ret;
  33983. __ret = (uint64x2_t)(__p0);
  33984. return __ret;
  33985. }
  33986. #else
  33987. __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
  33988. uint64x2_t __ret;
  33989. __ret = (uint64x2_t)(__p0);
  33990. return __ret;
  33991. }
  33992. #endif
  33993. #ifdef __LITTLE_ENDIAN__
  33994. __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
  33995. uint64x2_t __ret;
  33996. __ret = (uint64x2_t)(__p0);
  33997. return __ret;
  33998. }
  33999. #else
  34000. __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
  34001. uint64x2_t __ret;
  34002. __ret = (uint64x2_t)(__p0);
  34003. return __ret;
  34004. }
  34005. #endif
  34006. #ifdef __LITTLE_ENDIAN__
  34007. __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
  34008. uint64x2_t __ret;
  34009. __ret = (uint64x2_t)(__p0);
  34010. return __ret;
  34011. }
  34012. #else
  34013. __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
  34014. uint64x2_t __ret;
  34015. __ret = (uint64x2_t)(__p0);
  34016. return __ret;
  34017. }
  34018. #endif
  34019. #ifdef __LITTLE_ENDIAN__
  34020. __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
  34021. uint64x2_t __ret;
  34022. __ret = (uint64x2_t)(__p0);
  34023. return __ret;
  34024. }
  34025. #else
  34026. __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
  34027. uint64x2_t __ret;
  34028. __ret = (uint64x2_t)(__p0);
  34029. return __ret;
  34030. }
  34031. #endif
  34032. #ifdef __LITTLE_ENDIAN__
  34033. __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
  34034. uint16x8_t __ret;
  34035. __ret = (uint16x8_t)(__p0);
  34036. return __ret;
  34037. }
  34038. #else
  34039. __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
  34040. uint16x8_t __ret;
  34041. __ret = (uint16x8_t)(__p0);
  34042. return __ret;
  34043. }
  34044. #endif
  34045. #ifdef __LITTLE_ENDIAN__
  34046. __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
  34047. uint16x8_t __ret;
  34048. __ret = (uint16x8_t)(__p0);
  34049. return __ret;
  34050. }
  34051. #else
  34052. __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
  34053. uint16x8_t __ret;
  34054. __ret = (uint16x8_t)(__p0);
  34055. return __ret;
  34056. }
  34057. #endif
  34058. #ifdef __LITTLE_ENDIAN__
  34059. __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
  34060. uint16x8_t __ret;
  34061. __ret = (uint16x8_t)(__p0);
  34062. return __ret;
  34063. }
  34064. #else
  34065. __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
  34066. uint16x8_t __ret;
  34067. __ret = (uint16x8_t)(__p0);
  34068. return __ret;
  34069. }
  34070. #endif
  34071. #ifdef __LITTLE_ENDIAN__
  34072. __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
  34073. uint16x8_t __ret;
  34074. __ret = (uint16x8_t)(__p0);
  34075. return __ret;
  34076. }
  34077. #else
  34078. __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
  34079. uint16x8_t __ret;
  34080. __ret = (uint16x8_t)(__p0);
  34081. return __ret;
  34082. }
  34083. #endif
  34084. #ifdef __LITTLE_ENDIAN__
  34085. __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
  34086. uint16x8_t __ret;
  34087. __ret = (uint16x8_t)(__p0);
  34088. return __ret;
  34089. }
  34090. #else
  34091. __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
  34092. uint16x8_t __ret;
  34093. __ret = (uint16x8_t)(__p0);
  34094. return __ret;
  34095. }
  34096. #endif
  34097. #ifdef __LITTLE_ENDIAN__
  34098. __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
  34099. uint16x8_t __ret;
  34100. __ret = (uint16x8_t)(__p0);
  34101. return __ret;
  34102. }
  34103. #else
  34104. __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
  34105. uint16x8_t __ret;
  34106. __ret = (uint16x8_t)(__p0);
  34107. return __ret;
  34108. }
  34109. #endif
  34110. #ifdef __LITTLE_ENDIAN__
  34111. __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
  34112. uint16x8_t __ret;
  34113. __ret = (uint16x8_t)(__p0);
  34114. return __ret;
  34115. }
  34116. #else
  34117. __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
  34118. uint16x8_t __ret;
  34119. __ret = (uint16x8_t)(__p0);
  34120. return __ret;
  34121. }
  34122. #endif
  34123. #ifdef __LITTLE_ENDIAN__
  34124. __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
  34125. uint16x8_t __ret;
  34126. __ret = (uint16x8_t)(__p0);
  34127. return __ret;
  34128. }
  34129. #else
  34130. __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
  34131. uint16x8_t __ret;
  34132. __ret = (uint16x8_t)(__p0);
  34133. return __ret;
  34134. }
  34135. #endif
  34136. #ifdef __LITTLE_ENDIAN__
  34137. __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
  34138. uint16x8_t __ret;
  34139. __ret = (uint16x8_t)(__p0);
  34140. return __ret;
  34141. }
  34142. #else
  34143. __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
  34144. uint16x8_t __ret;
  34145. __ret = (uint16x8_t)(__p0);
  34146. return __ret;
  34147. }
  34148. #endif
  34149. #ifdef __LITTLE_ENDIAN__
  34150. __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
  34151. uint16x8_t __ret;
  34152. __ret = (uint16x8_t)(__p0);
  34153. return __ret;
  34154. }
  34155. #else
  34156. __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
  34157. uint16x8_t __ret;
  34158. __ret = (uint16x8_t)(__p0);
  34159. return __ret;
  34160. }
  34161. #endif
  34162. #ifdef __LITTLE_ENDIAN__
  34163. __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
  34164. uint16x8_t __ret;
  34165. __ret = (uint16x8_t)(__p0);
  34166. return __ret;
  34167. }
  34168. #else
  34169. __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
  34170. uint16x8_t __ret;
  34171. __ret = (uint16x8_t)(__p0);
  34172. return __ret;
  34173. }
  34174. #endif
  34175. #ifdef __LITTLE_ENDIAN__
  34176. __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
  34177. uint16x8_t __ret;
  34178. __ret = (uint16x8_t)(__p0);
  34179. return __ret;
  34180. }
  34181. #else
  34182. __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
  34183. uint16x8_t __ret;
  34184. __ret = (uint16x8_t)(__p0);
  34185. return __ret;
  34186. }
  34187. #endif
  34188. #ifdef __LITTLE_ENDIAN__
  34189. __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
  34190. uint16x8_t __ret;
  34191. __ret = (uint16x8_t)(__p0);
  34192. return __ret;
  34193. }
  34194. #else
  34195. __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
  34196. uint16x8_t __ret;
  34197. __ret = (uint16x8_t)(__p0);
  34198. return __ret;
  34199. }
  34200. #endif
  34201. #ifdef __LITTLE_ENDIAN__
  34202. __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
  34203. uint16x8_t __ret;
  34204. __ret = (uint16x8_t)(__p0);
  34205. return __ret;
  34206. }
  34207. #else
  34208. __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
  34209. uint16x8_t __ret;
  34210. __ret = (uint16x8_t)(__p0);
  34211. return __ret;
  34212. }
  34213. #endif
  34214. #ifdef __LITTLE_ENDIAN__
  34215. __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
  34216. int8x16_t __ret;
  34217. __ret = (int8x16_t)(__p0);
  34218. return __ret;
  34219. }
  34220. #else
  34221. __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
  34222. int8x16_t __ret;
  34223. __ret = (int8x16_t)(__p0);
  34224. return __ret;
  34225. }
  34226. #endif
  34227. #ifdef __LITTLE_ENDIAN__
  34228. __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
  34229. int8x16_t __ret;
  34230. __ret = (int8x16_t)(__p0);
  34231. return __ret;
  34232. }
  34233. #else
  34234. __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
  34235. int8x16_t __ret;
  34236. __ret = (int8x16_t)(__p0);
  34237. return __ret;
  34238. }
  34239. #endif
  34240. #ifdef __LITTLE_ENDIAN__
  34241. __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
  34242. int8x16_t __ret;
  34243. __ret = (int8x16_t)(__p0);
  34244. return __ret;
  34245. }
  34246. #else
  34247. __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
  34248. int8x16_t __ret;
  34249. __ret = (int8x16_t)(__p0);
  34250. return __ret;
  34251. }
  34252. #endif
  34253. #ifdef __LITTLE_ENDIAN__
  34254. __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
  34255. int8x16_t __ret;
  34256. __ret = (int8x16_t)(__p0);
  34257. return __ret;
  34258. }
  34259. #else
  34260. __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
  34261. int8x16_t __ret;
  34262. __ret = (int8x16_t)(__p0);
  34263. return __ret;
  34264. }
  34265. #endif
  34266. #ifdef __LITTLE_ENDIAN__
  34267. __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
  34268. int8x16_t __ret;
  34269. __ret = (int8x16_t)(__p0);
  34270. return __ret;
  34271. }
  34272. #else
  34273. __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
  34274. int8x16_t __ret;
  34275. __ret = (int8x16_t)(__p0);
  34276. return __ret;
  34277. }
  34278. #endif
  34279. #ifdef __LITTLE_ENDIAN__
  34280. __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
  34281. int8x16_t __ret;
  34282. __ret = (int8x16_t)(__p0);
  34283. return __ret;
  34284. }
  34285. #else
  34286. __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
  34287. int8x16_t __ret;
  34288. __ret = (int8x16_t)(__p0);
  34289. return __ret;
  34290. }
  34291. #endif
  34292. #ifdef __LITTLE_ENDIAN__
  34293. __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
  34294. int8x16_t __ret;
  34295. __ret = (int8x16_t)(__p0);
  34296. return __ret;
  34297. }
  34298. #else
  34299. __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
  34300. int8x16_t __ret;
  34301. __ret = (int8x16_t)(__p0);
  34302. return __ret;
  34303. }
  34304. #endif
  34305. #ifdef __LITTLE_ENDIAN__
  34306. __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
  34307. int8x16_t __ret;
  34308. __ret = (int8x16_t)(__p0);
  34309. return __ret;
  34310. }
  34311. #else
  34312. __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
  34313. int8x16_t __ret;
  34314. __ret = (int8x16_t)(__p0);
  34315. return __ret;
  34316. }
  34317. #endif
  34318. #ifdef __LITTLE_ENDIAN__
  34319. __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
  34320. int8x16_t __ret;
  34321. __ret = (int8x16_t)(__p0);
  34322. return __ret;
  34323. }
  34324. #else
  34325. __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
  34326. int8x16_t __ret;
  34327. __ret = (int8x16_t)(__p0);
  34328. return __ret;
  34329. }
  34330. #endif
  34331. #ifdef __LITTLE_ENDIAN__
  34332. __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
  34333. int8x16_t __ret;
  34334. __ret = (int8x16_t)(__p0);
  34335. return __ret;
  34336. }
  34337. #else
  34338. __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
  34339. int8x16_t __ret;
  34340. __ret = (int8x16_t)(__p0);
  34341. return __ret;
  34342. }
  34343. #endif
  34344. #ifdef __LITTLE_ENDIAN__
  34345. __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
  34346. int8x16_t __ret;
  34347. __ret = (int8x16_t)(__p0);
  34348. return __ret;
  34349. }
  34350. #else
  34351. __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
  34352. int8x16_t __ret;
  34353. __ret = (int8x16_t)(__p0);
  34354. return __ret;
  34355. }
  34356. #endif
  34357. #ifdef __LITTLE_ENDIAN__
  34358. __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
  34359. int8x16_t __ret;
  34360. __ret = (int8x16_t)(__p0);
  34361. return __ret;
  34362. }
  34363. #else
  34364. __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
  34365. int8x16_t __ret;
  34366. __ret = (int8x16_t)(__p0);
  34367. return __ret;
  34368. }
  34369. #endif
  34370. #ifdef __LITTLE_ENDIAN__
  34371. __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
  34372. int8x16_t __ret;
  34373. __ret = (int8x16_t)(__p0);
  34374. return __ret;
  34375. }
  34376. #else
  34377. __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
  34378. int8x16_t __ret;
  34379. __ret = (int8x16_t)(__p0);
  34380. return __ret;
  34381. }
  34382. #endif
  34383. #ifdef __LITTLE_ENDIAN__
  34384. __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
  34385. int8x16_t __ret;
  34386. __ret = (int8x16_t)(__p0);
  34387. return __ret;
  34388. }
  34389. #else
  34390. __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
  34391. int8x16_t __ret;
  34392. __ret = (int8x16_t)(__p0);
  34393. return __ret;
  34394. }
  34395. #endif
  34396. #ifdef __LITTLE_ENDIAN__
  34397. __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
  34398. float64x2_t __ret;
  34399. __ret = (float64x2_t)(__p0);
  34400. return __ret;
  34401. }
  34402. #else
  34403. __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
  34404. float64x2_t __ret;
  34405. __ret = (float64x2_t)(__p0);
  34406. return __ret;
  34407. }
  34408. #endif
  34409. #ifdef __LITTLE_ENDIAN__
  34410. __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
  34411. float64x2_t __ret;
  34412. __ret = (float64x2_t)(__p0);
  34413. return __ret;
  34414. }
  34415. #else
  34416. __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
  34417. float64x2_t __ret;
  34418. __ret = (float64x2_t)(__p0);
  34419. return __ret;
  34420. }
  34421. #endif
  34422. #ifdef __LITTLE_ENDIAN__
  34423. __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
  34424. float64x2_t __ret;
  34425. __ret = (float64x2_t)(__p0);
  34426. return __ret;
  34427. }
  34428. #else
  34429. __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
  34430. float64x2_t __ret;
  34431. __ret = (float64x2_t)(__p0);
  34432. return __ret;
  34433. }
  34434. #endif
  34435. #ifdef __LITTLE_ENDIAN__
  34436. __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
  34437. float64x2_t __ret;
  34438. __ret = (float64x2_t)(__p0);
  34439. return __ret;
  34440. }
  34441. #else
  34442. __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
  34443. float64x2_t __ret;
  34444. __ret = (float64x2_t)(__p0);
  34445. return __ret;
  34446. }
  34447. #endif
  34448. #ifdef __LITTLE_ENDIAN__
  34449. __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
  34450. float64x2_t __ret;
  34451. __ret = (float64x2_t)(__p0);
  34452. return __ret;
  34453. }
  34454. #else
  34455. __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
  34456. float64x2_t __ret;
  34457. __ret = (float64x2_t)(__p0);
  34458. return __ret;
  34459. }
  34460. #endif
  34461. #ifdef __LITTLE_ENDIAN__
  34462. __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
  34463. float64x2_t __ret;
  34464. __ret = (float64x2_t)(__p0);
  34465. return __ret;
  34466. }
  34467. #else
  34468. __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
  34469. float64x2_t __ret;
  34470. __ret = (float64x2_t)(__p0);
  34471. return __ret;
  34472. }
  34473. #endif
  34474. #ifdef __LITTLE_ENDIAN__
  34475. __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
  34476. float64x2_t __ret;
  34477. __ret = (float64x2_t)(__p0);
  34478. return __ret;
  34479. }
  34480. #else
  34481. __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
  34482. float64x2_t __ret;
  34483. __ret = (float64x2_t)(__p0);
  34484. return __ret;
  34485. }
  34486. #endif
  34487. #ifdef __LITTLE_ENDIAN__
  34488. __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
  34489. float64x2_t __ret;
  34490. __ret = (float64x2_t)(__p0);
  34491. return __ret;
  34492. }
  34493. #else
  34494. __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
  34495. float64x2_t __ret;
  34496. __ret = (float64x2_t)(__p0);
  34497. return __ret;
  34498. }
  34499. #endif
  34500. #ifdef __LITTLE_ENDIAN__
  34501. __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
  34502. float64x2_t __ret;
  34503. __ret = (float64x2_t)(__p0);
  34504. return __ret;
  34505. }
  34506. #else
  34507. __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
  34508. float64x2_t __ret;
  34509. __ret = (float64x2_t)(__p0);
  34510. return __ret;
  34511. }
  34512. #endif
  34513. #ifdef __LITTLE_ENDIAN__
  34514. __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
  34515. float64x2_t __ret;
  34516. __ret = (float64x2_t)(__p0);
  34517. return __ret;
  34518. }
  34519. #else
  34520. __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
  34521. float64x2_t __ret;
  34522. __ret = (float64x2_t)(__p0);
  34523. return __ret;
  34524. }
  34525. #endif
  34526. #ifdef __LITTLE_ENDIAN__
  34527. __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
  34528. float64x2_t __ret;
  34529. __ret = (float64x2_t)(__p0);
  34530. return __ret;
  34531. }
  34532. #else
  34533. __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
  34534. float64x2_t __ret;
  34535. __ret = (float64x2_t)(__p0);
  34536. return __ret;
  34537. }
  34538. #endif
  34539. #ifdef __LITTLE_ENDIAN__
  34540. __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
  34541. float64x2_t __ret;
  34542. __ret = (float64x2_t)(__p0);
  34543. return __ret;
  34544. }
  34545. #else
  34546. __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
  34547. float64x2_t __ret;
  34548. __ret = (float64x2_t)(__p0);
  34549. return __ret;
  34550. }
  34551. #endif
  34552. #ifdef __LITTLE_ENDIAN__
  34553. __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
  34554. float64x2_t __ret;
  34555. __ret = (float64x2_t)(__p0);
  34556. return __ret;
  34557. }
  34558. #else
  34559. __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
  34560. float64x2_t __ret;
  34561. __ret = (float64x2_t)(__p0);
  34562. return __ret;
  34563. }
  34564. #endif
  34565. #ifdef __LITTLE_ENDIAN__
  34566. __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
  34567. float64x2_t __ret;
  34568. __ret = (float64x2_t)(__p0);
  34569. return __ret;
  34570. }
  34571. #else
  34572. __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
  34573. float64x2_t __ret;
  34574. __ret = (float64x2_t)(__p0);
  34575. return __ret;
  34576. }
  34577. #endif
  34578. #ifdef __LITTLE_ENDIAN__
  34579. __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
  34580. float32x4_t __ret;
  34581. __ret = (float32x4_t)(__p0);
  34582. return __ret;
  34583. }
  34584. #else
  34585. __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
  34586. float32x4_t __ret;
  34587. __ret = (float32x4_t)(__p0);
  34588. return __ret;
  34589. }
  34590. #endif
  34591. #ifdef __LITTLE_ENDIAN__
  34592. __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
  34593. float32x4_t __ret;
  34594. __ret = (float32x4_t)(__p0);
  34595. return __ret;
  34596. }
  34597. #else
  34598. __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
  34599. float32x4_t __ret;
  34600. __ret = (float32x4_t)(__p0);
  34601. return __ret;
  34602. }
  34603. #endif
  34604. #ifdef __LITTLE_ENDIAN__
  34605. __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
  34606. float32x4_t __ret;
  34607. __ret = (float32x4_t)(__p0);
  34608. return __ret;
  34609. }
  34610. #else
  34611. __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
  34612. float32x4_t __ret;
  34613. __ret = (float32x4_t)(__p0);
  34614. return __ret;
  34615. }
  34616. #endif
  34617. #ifdef __LITTLE_ENDIAN__
  34618. __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
  34619. float32x4_t __ret;
  34620. __ret = (float32x4_t)(__p0);
  34621. return __ret;
  34622. }
  34623. #else
  34624. __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
  34625. float32x4_t __ret;
  34626. __ret = (float32x4_t)(__p0);
  34627. return __ret;
  34628. }
  34629. #endif
  34630. #ifdef __LITTLE_ENDIAN__
  34631. __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
  34632. float32x4_t __ret;
  34633. __ret = (float32x4_t)(__p0);
  34634. return __ret;
  34635. }
  34636. #else
  34637. __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
  34638. float32x4_t __ret;
  34639. __ret = (float32x4_t)(__p0);
  34640. return __ret;
  34641. }
  34642. #endif
  34643. #ifdef __LITTLE_ENDIAN__
  34644. __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
  34645. float32x4_t __ret;
  34646. __ret = (float32x4_t)(__p0);
  34647. return __ret;
  34648. }
  34649. #else
  34650. __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
  34651. float32x4_t __ret;
  34652. __ret = (float32x4_t)(__p0);
  34653. return __ret;
  34654. }
  34655. #endif
  34656. #ifdef __LITTLE_ENDIAN__
  34657. __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
  34658. float32x4_t __ret;
  34659. __ret = (float32x4_t)(__p0);
  34660. return __ret;
  34661. }
  34662. #else
  34663. __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
  34664. float32x4_t __ret;
  34665. __ret = (float32x4_t)(__p0);
  34666. return __ret;
  34667. }
  34668. #endif
  34669. #ifdef __LITTLE_ENDIAN__
  34670. __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
  34671. float32x4_t __ret;
  34672. __ret = (float32x4_t)(__p0);
  34673. return __ret;
  34674. }
  34675. #else
  34676. __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
  34677. float32x4_t __ret;
  34678. __ret = (float32x4_t)(__p0);
  34679. return __ret;
  34680. }
  34681. #endif
  34682. #ifdef __LITTLE_ENDIAN__
  34683. __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
  34684. float32x4_t __ret;
  34685. __ret = (float32x4_t)(__p0);
  34686. return __ret;
  34687. }
  34688. #else
  34689. __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
  34690. float32x4_t __ret;
  34691. __ret = (float32x4_t)(__p0);
  34692. return __ret;
  34693. }
  34694. #endif
  34695. #ifdef __LITTLE_ENDIAN__
  34696. __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
  34697. float32x4_t __ret;
  34698. __ret = (float32x4_t)(__p0);
  34699. return __ret;
  34700. }
  34701. #else
  34702. __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
  34703. float32x4_t __ret;
  34704. __ret = (float32x4_t)(__p0);
  34705. return __ret;
  34706. }
  34707. #endif
  34708. #ifdef __LITTLE_ENDIAN__
  34709. __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
  34710. float32x4_t __ret;
  34711. __ret = (float32x4_t)(__p0);
  34712. return __ret;
  34713. }
  34714. #else
  34715. __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
  34716. float32x4_t __ret;
  34717. __ret = (float32x4_t)(__p0);
  34718. return __ret;
  34719. }
  34720. #endif
  34721. #ifdef __LITTLE_ENDIAN__
  34722. __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
  34723. float32x4_t __ret;
  34724. __ret = (float32x4_t)(__p0);
  34725. return __ret;
  34726. }
  34727. #else
  34728. __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
  34729. float32x4_t __ret;
  34730. __ret = (float32x4_t)(__p0);
  34731. return __ret;
  34732. }
  34733. #endif
  34734. #ifdef __LITTLE_ENDIAN__
  34735. __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
  34736. float32x4_t __ret;
  34737. __ret = (float32x4_t)(__p0);
  34738. return __ret;
  34739. }
  34740. #else
  34741. __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
  34742. float32x4_t __ret;
  34743. __ret = (float32x4_t)(__p0);
  34744. return __ret;
  34745. }
  34746. #endif
  34747. #ifdef __LITTLE_ENDIAN__
  34748. __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
  34749. float32x4_t __ret;
  34750. __ret = (float32x4_t)(__p0);
  34751. return __ret;
  34752. }
  34753. #else
  34754. __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
  34755. float32x4_t __ret;
  34756. __ret = (float32x4_t)(__p0);
  34757. return __ret;
  34758. }
  34759. #endif
  34760. #ifdef __LITTLE_ENDIAN__
  34761. __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
  34762. float16x8_t __ret;
  34763. __ret = (float16x8_t)(__p0);
  34764. return __ret;
  34765. }
  34766. #else
  34767. __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
  34768. float16x8_t __ret;
  34769. __ret = (float16x8_t)(__p0);
  34770. return __ret;
  34771. }
  34772. #endif
  34773. #ifdef __LITTLE_ENDIAN__
  34774. __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
  34775. float16x8_t __ret;
  34776. __ret = (float16x8_t)(__p0);
  34777. return __ret;
  34778. }
  34779. #else
  34780. __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
  34781. float16x8_t __ret;
  34782. __ret = (float16x8_t)(__p0);
  34783. return __ret;
  34784. }
  34785. #endif
  34786. #ifdef __LITTLE_ENDIAN__
  34787. __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
  34788. float16x8_t __ret;
  34789. __ret = (float16x8_t)(__p0);
  34790. return __ret;
  34791. }
  34792. #else
  34793. __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
  34794. float16x8_t __ret;
  34795. __ret = (float16x8_t)(__p0);
  34796. return __ret;
  34797. }
  34798. #endif
  34799. #ifdef __LITTLE_ENDIAN__
  34800. __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
  34801. float16x8_t __ret;
  34802. __ret = (float16x8_t)(__p0);
  34803. return __ret;
  34804. }
  34805. #else
  34806. __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
  34807. float16x8_t __ret;
  34808. __ret = (float16x8_t)(__p0);
  34809. return __ret;
  34810. }
  34811. #endif
  34812. #ifdef __LITTLE_ENDIAN__
  34813. __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
  34814. float16x8_t __ret;
  34815. __ret = (float16x8_t)(__p0);
  34816. return __ret;
  34817. }
  34818. #else
  34819. __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
  34820. float16x8_t __ret;
  34821. __ret = (float16x8_t)(__p0);
  34822. return __ret;
  34823. }
  34824. #endif
  34825. #ifdef __LITTLE_ENDIAN__
  34826. __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
  34827. float16x8_t __ret;
  34828. __ret = (float16x8_t)(__p0);
  34829. return __ret;
  34830. }
  34831. #else
  34832. __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
  34833. float16x8_t __ret;
  34834. __ret = (float16x8_t)(__p0);
  34835. return __ret;
  34836. }
  34837. #endif
  34838. #ifdef __LITTLE_ENDIAN__
  34839. __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
  34840. float16x8_t __ret;
  34841. __ret = (float16x8_t)(__p0);
  34842. return __ret;
  34843. }
  34844. #else
  34845. __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
  34846. float16x8_t __ret;
  34847. __ret = (float16x8_t)(__p0);
  34848. return __ret;
  34849. }
  34850. #endif
  34851. #ifdef __LITTLE_ENDIAN__
  34852. __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
  34853. float16x8_t __ret;
  34854. __ret = (float16x8_t)(__p0);
  34855. return __ret;
  34856. }
  34857. #else
  34858. __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
  34859. float16x8_t __ret;
  34860. __ret = (float16x8_t)(__p0);
  34861. return __ret;
  34862. }
  34863. #endif
  34864. #ifdef __LITTLE_ENDIAN__
  34865. __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
  34866. float16x8_t __ret;
  34867. __ret = (float16x8_t)(__p0);
  34868. return __ret;
  34869. }
  34870. #else
  34871. __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
  34872. float16x8_t __ret;
  34873. __ret = (float16x8_t)(__p0);
  34874. return __ret;
  34875. }
  34876. #endif
  34877. #ifdef __LITTLE_ENDIAN__
  34878. __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
  34879. float16x8_t __ret;
  34880. __ret = (float16x8_t)(__p0);
  34881. return __ret;
  34882. }
  34883. #else
  34884. __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
  34885. float16x8_t __ret;
  34886. __ret = (float16x8_t)(__p0);
  34887. return __ret;
  34888. }
  34889. #endif
  34890. #ifdef __LITTLE_ENDIAN__
  34891. __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
  34892. float16x8_t __ret;
  34893. __ret = (float16x8_t)(__p0);
  34894. return __ret;
  34895. }
  34896. #else
  34897. __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
  34898. float16x8_t __ret;
  34899. __ret = (float16x8_t)(__p0);
  34900. return __ret;
  34901. }
  34902. #endif
  34903. #ifdef __LITTLE_ENDIAN__
  34904. __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
  34905. float16x8_t __ret;
  34906. __ret = (float16x8_t)(__p0);
  34907. return __ret;
  34908. }
  34909. #else
  34910. __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
  34911. float16x8_t __ret;
  34912. __ret = (float16x8_t)(__p0);
  34913. return __ret;
  34914. }
  34915. #endif
  34916. #ifdef __LITTLE_ENDIAN__
  34917. __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
  34918. float16x8_t __ret;
  34919. __ret = (float16x8_t)(__p0);
  34920. return __ret;
  34921. }
  34922. #else
  34923. __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
  34924. float16x8_t __ret;
  34925. __ret = (float16x8_t)(__p0);
  34926. return __ret;
  34927. }
  34928. #endif
  34929. #ifdef __LITTLE_ENDIAN__
  34930. __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
  34931. float16x8_t __ret;
  34932. __ret = (float16x8_t)(__p0);
  34933. return __ret;
  34934. }
  34935. #else
  34936. __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
  34937. float16x8_t __ret;
  34938. __ret = (float16x8_t)(__p0);
  34939. return __ret;
  34940. }
  34941. #endif
  34942. #ifdef __LITTLE_ENDIAN__
  34943. __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
  34944. int32x4_t __ret;
  34945. __ret = (int32x4_t)(__p0);
  34946. return __ret;
  34947. }
  34948. #else
  34949. __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
  34950. int32x4_t __ret;
  34951. __ret = (int32x4_t)(__p0);
  34952. return __ret;
  34953. }
  34954. #endif
  34955. #ifdef __LITTLE_ENDIAN__
  34956. __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
  34957. int32x4_t __ret;
  34958. __ret = (int32x4_t)(__p0);
  34959. return __ret;
  34960. }
  34961. #else
  34962. __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
  34963. int32x4_t __ret;
  34964. __ret = (int32x4_t)(__p0);
  34965. return __ret;
  34966. }
  34967. #endif
  34968. #ifdef __LITTLE_ENDIAN__
  34969. __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
  34970. int32x4_t __ret;
  34971. __ret = (int32x4_t)(__p0);
  34972. return __ret;
  34973. }
  34974. #else
  34975. __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
  34976. int32x4_t __ret;
  34977. __ret = (int32x4_t)(__p0);
  34978. return __ret;
  34979. }
  34980. #endif
  34981. #ifdef __LITTLE_ENDIAN__
  34982. __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
  34983. int32x4_t __ret;
  34984. __ret = (int32x4_t)(__p0);
  34985. return __ret;
  34986. }
  34987. #else
  34988. __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
  34989. int32x4_t __ret;
  34990. __ret = (int32x4_t)(__p0);
  34991. return __ret;
  34992. }
  34993. #endif
  34994. #ifdef __LITTLE_ENDIAN__
  34995. __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
  34996. int32x4_t __ret;
  34997. __ret = (int32x4_t)(__p0);
  34998. return __ret;
  34999. }
  35000. #else
  35001. __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
  35002. int32x4_t __ret;
  35003. __ret = (int32x4_t)(__p0);
  35004. return __ret;
  35005. }
  35006. #endif
  35007. #ifdef __LITTLE_ENDIAN__
  35008. __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
  35009. int32x4_t __ret;
  35010. __ret = (int32x4_t)(__p0);
  35011. return __ret;
  35012. }
  35013. #else
  35014. __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
  35015. int32x4_t __ret;
  35016. __ret = (int32x4_t)(__p0);
  35017. return __ret;
  35018. }
  35019. #endif
  35020. #ifdef __LITTLE_ENDIAN__
  35021. __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
  35022. int32x4_t __ret;
  35023. __ret = (int32x4_t)(__p0);
  35024. return __ret;
  35025. }
  35026. #else
  35027. __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
  35028. int32x4_t __ret;
  35029. __ret = (int32x4_t)(__p0);
  35030. return __ret;
  35031. }
  35032. #endif
  35033. #ifdef __LITTLE_ENDIAN__
  35034. __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
  35035. int32x4_t __ret;
  35036. __ret = (int32x4_t)(__p0);
  35037. return __ret;
  35038. }
  35039. #else
  35040. __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
  35041. int32x4_t __ret;
  35042. __ret = (int32x4_t)(__p0);
  35043. return __ret;
  35044. }
  35045. #endif
  35046. #ifdef __LITTLE_ENDIAN__
  35047. __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
  35048. int32x4_t __ret;
  35049. __ret = (int32x4_t)(__p0);
  35050. return __ret;
  35051. }
  35052. #else
  35053. __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
  35054. int32x4_t __ret;
  35055. __ret = (int32x4_t)(__p0);
  35056. return __ret;
  35057. }
  35058. #endif
  35059. #ifdef __LITTLE_ENDIAN__
  35060. __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
  35061. int32x4_t __ret;
  35062. __ret = (int32x4_t)(__p0);
  35063. return __ret;
  35064. }
  35065. #else
  35066. __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
  35067. int32x4_t __ret;
  35068. __ret = (int32x4_t)(__p0);
  35069. return __ret;
  35070. }
  35071. #endif
  35072. #ifdef __LITTLE_ENDIAN__
  35073. __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
  35074. int32x4_t __ret;
  35075. __ret = (int32x4_t)(__p0);
  35076. return __ret;
  35077. }
  35078. #else
  35079. __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
  35080. int32x4_t __ret;
  35081. __ret = (int32x4_t)(__p0);
  35082. return __ret;
  35083. }
  35084. #endif
  35085. #ifdef __LITTLE_ENDIAN__
  35086. __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
  35087. int32x4_t __ret;
  35088. __ret = (int32x4_t)(__p0);
  35089. return __ret;
  35090. }
  35091. #else
  35092. __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
  35093. int32x4_t __ret;
  35094. __ret = (int32x4_t)(__p0);
  35095. return __ret;
  35096. }
  35097. #endif
  35098. #ifdef __LITTLE_ENDIAN__
  35099. __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
  35100. int32x4_t __ret;
  35101. __ret = (int32x4_t)(__p0);
  35102. return __ret;
  35103. }
  35104. #else
  35105. __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
  35106. int32x4_t __ret;
  35107. __ret = (int32x4_t)(__p0);
  35108. return __ret;
  35109. }
  35110. #endif
  35111. #ifdef __LITTLE_ENDIAN__
  35112. __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
  35113. int32x4_t __ret;
  35114. __ret = (int32x4_t)(__p0);
  35115. return __ret;
  35116. }
  35117. #else
  35118. __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
  35119. int32x4_t __ret;
  35120. __ret = (int32x4_t)(__p0);
  35121. return __ret;
  35122. }
  35123. #endif
  35124. #ifdef __LITTLE_ENDIAN__
  35125. __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
  35126. int64x2_t __ret;
  35127. __ret = (int64x2_t)(__p0);
  35128. return __ret;
  35129. }
  35130. #else
  35131. __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
  35132. int64x2_t __ret;
  35133. __ret = (int64x2_t)(__p0);
  35134. return __ret;
  35135. }
  35136. #endif
  35137. #ifdef __LITTLE_ENDIAN__
  35138. __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
  35139. int64x2_t __ret;
  35140. __ret = (int64x2_t)(__p0);
  35141. return __ret;
  35142. }
  35143. #else
  35144. __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
  35145. int64x2_t __ret;
  35146. __ret = (int64x2_t)(__p0);
  35147. return __ret;
  35148. }
  35149. #endif
  35150. #ifdef __LITTLE_ENDIAN__
  35151. __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
  35152. int64x2_t __ret;
  35153. __ret = (int64x2_t)(__p0);
  35154. return __ret;
  35155. }
  35156. #else
  35157. __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
  35158. int64x2_t __ret;
  35159. __ret = (int64x2_t)(__p0);
  35160. return __ret;
  35161. }
  35162. #endif
  35163. #ifdef __LITTLE_ENDIAN__
  35164. __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
  35165. int64x2_t __ret;
  35166. __ret = (int64x2_t)(__p0);
  35167. return __ret;
  35168. }
  35169. #else
  35170. __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
  35171. int64x2_t __ret;
  35172. __ret = (int64x2_t)(__p0);
  35173. return __ret;
  35174. }
  35175. #endif
  35176. #ifdef __LITTLE_ENDIAN__
  35177. __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
  35178. int64x2_t __ret;
  35179. __ret = (int64x2_t)(__p0);
  35180. return __ret;
  35181. }
  35182. #else
  35183. __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
  35184. int64x2_t __ret;
  35185. __ret = (int64x2_t)(__p0);
  35186. return __ret;
  35187. }
  35188. #endif
  35189. #ifdef __LITTLE_ENDIAN__
  35190. __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
  35191. int64x2_t __ret;
  35192. __ret = (int64x2_t)(__p0);
  35193. return __ret;
  35194. }
  35195. #else
  35196. __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
  35197. int64x2_t __ret;
  35198. __ret = (int64x2_t)(__p0);
  35199. return __ret;
  35200. }
  35201. #endif
  35202. #ifdef __LITTLE_ENDIAN__
  35203. __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
  35204. int64x2_t __ret;
  35205. __ret = (int64x2_t)(__p0);
  35206. return __ret;
  35207. }
  35208. #else
  35209. __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
  35210. int64x2_t __ret;
  35211. __ret = (int64x2_t)(__p0);
  35212. return __ret;
  35213. }
  35214. #endif
  35215. #ifdef __LITTLE_ENDIAN__
  35216. __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
  35217. int64x2_t __ret;
  35218. __ret = (int64x2_t)(__p0);
  35219. return __ret;
  35220. }
  35221. #else
  35222. __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
  35223. int64x2_t __ret;
  35224. __ret = (int64x2_t)(__p0);
  35225. return __ret;
  35226. }
  35227. #endif
  35228. #ifdef __LITTLE_ENDIAN__
  35229. __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
  35230. int64x2_t __ret;
  35231. __ret = (int64x2_t)(__p0);
  35232. return __ret;
  35233. }
  35234. #else
  35235. __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
  35236. int64x2_t __ret;
  35237. __ret = (int64x2_t)(__p0);
  35238. return __ret;
  35239. }
  35240. #endif
  35241. #ifdef __LITTLE_ENDIAN__
  35242. __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
  35243. int64x2_t __ret;
  35244. __ret = (int64x2_t)(__p0);
  35245. return __ret;
  35246. }
  35247. #else
  35248. __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
  35249. int64x2_t __ret;
  35250. __ret = (int64x2_t)(__p0);
  35251. return __ret;
  35252. }
  35253. #endif
  35254. #ifdef __LITTLE_ENDIAN__
  35255. __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
  35256. int64x2_t __ret;
  35257. __ret = (int64x2_t)(__p0);
  35258. return __ret;
  35259. }
  35260. #else
  35261. __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
  35262. int64x2_t __ret;
  35263. __ret = (int64x2_t)(__p0);
  35264. return __ret;
  35265. }
  35266. #endif
  35267. #ifdef __LITTLE_ENDIAN__
  35268. __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
  35269. int64x2_t __ret;
  35270. __ret = (int64x2_t)(__p0);
  35271. return __ret;
  35272. }
  35273. #else
  35274. __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
  35275. int64x2_t __ret;
  35276. __ret = (int64x2_t)(__p0);
  35277. return __ret;
  35278. }
  35279. #endif
  35280. #ifdef __LITTLE_ENDIAN__
  35281. __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
  35282. int64x2_t __ret;
  35283. __ret = (int64x2_t)(__p0);
  35284. return __ret;
  35285. }
  35286. #else
  35287. __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
  35288. int64x2_t __ret;
  35289. __ret = (int64x2_t)(__p0);
  35290. return __ret;
  35291. }
  35292. #endif
  35293. #ifdef __LITTLE_ENDIAN__
  35294. __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
  35295. int64x2_t __ret;
  35296. __ret = (int64x2_t)(__p0);
  35297. return __ret;
  35298. }
  35299. #else
  35300. __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
  35301. int64x2_t __ret;
  35302. __ret = (int64x2_t)(__p0);
  35303. return __ret;
  35304. }
  35305. #endif
  35306. #ifdef __LITTLE_ENDIAN__
  35307. __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
  35308. int16x8_t __ret;
  35309. __ret = (int16x8_t)(__p0);
  35310. return __ret;
  35311. }
  35312. #else
  35313. __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
  35314. int16x8_t __ret;
  35315. __ret = (int16x8_t)(__p0);
  35316. return __ret;
  35317. }
  35318. #endif
  35319. #ifdef __LITTLE_ENDIAN__
  35320. __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
  35321. int16x8_t __ret;
  35322. __ret = (int16x8_t)(__p0);
  35323. return __ret;
  35324. }
  35325. #else
  35326. __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
  35327. int16x8_t __ret;
  35328. __ret = (int16x8_t)(__p0);
  35329. return __ret;
  35330. }
  35331. #endif
  35332. #ifdef __LITTLE_ENDIAN__
  35333. __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
  35334. int16x8_t __ret;
  35335. __ret = (int16x8_t)(__p0);
  35336. return __ret;
  35337. }
  35338. #else
  35339. __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
  35340. int16x8_t __ret;
  35341. __ret = (int16x8_t)(__p0);
  35342. return __ret;
  35343. }
  35344. #endif
  35345. #ifdef __LITTLE_ENDIAN__
  35346. __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
  35347. int16x8_t __ret;
  35348. __ret = (int16x8_t)(__p0);
  35349. return __ret;
  35350. }
  35351. #else
  35352. __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
  35353. int16x8_t __ret;
  35354. __ret = (int16x8_t)(__p0);
  35355. return __ret;
  35356. }
  35357. #endif
  35358. #ifdef __LITTLE_ENDIAN__
  35359. __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
  35360. int16x8_t __ret;
  35361. __ret = (int16x8_t)(__p0);
  35362. return __ret;
  35363. }
  35364. #else
  35365. __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
  35366. int16x8_t __ret;
  35367. __ret = (int16x8_t)(__p0);
  35368. return __ret;
  35369. }
  35370. #endif
  35371. #ifdef __LITTLE_ENDIAN__
  35372. __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
  35373. int16x8_t __ret;
  35374. __ret = (int16x8_t)(__p0);
  35375. return __ret;
  35376. }
  35377. #else
  35378. __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
  35379. int16x8_t __ret;
  35380. __ret = (int16x8_t)(__p0);
  35381. return __ret;
  35382. }
  35383. #endif
  35384. #ifdef __LITTLE_ENDIAN__
  35385. __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
  35386. int16x8_t __ret;
  35387. __ret = (int16x8_t)(__p0);
  35388. return __ret;
  35389. }
  35390. #else
  35391. __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
  35392. int16x8_t __ret;
  35393. __ret = (int16x8_t)(__p0);
  35394. return __ret;
  35395. }
  35396. #endif
  35397. #ifdef __LITTLE_ENDIAN__
  35398. __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
  35399. int16x8_t __ret;
  35400. __ret = (int16x8_t)(__p0);
  35401. return __ret;
  35402. }
  35403. #else
  35404. __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
  35405. int16x8_t __ret;
  35406. __ret = (int16x8_t)(__p0);
  35407. return __ret;
  35408. }
  35409. #endif
  35410. #ifdef __LITTLE_ENDIAN__
  35411. __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
  35412. int16x8_t __ret;
  35413. __ret = (int16x8_t)(__p0);
  35414. return __ret;
  35415. }
  35416. #else
  35417. __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
  35418. int16x8_t __ret;
  35419. __ret = (int16x8_t)(__p0);
  35420. return __ret;
  35421. }
  35422. #endif
  35423. #ifdef __LITTLE_ENDIAN__
  35424. __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
  35425. int16x8_t __ret;
  35426. __ret = (int16x8_t)(__p0);
  35427. return __ret;
  35428. }
  35429. #else
  35430. __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
  35431. int16x8_t __ret;
  35432. __ret = (int16x8_t)(__p0);
  35433. return __ret;
  35434. }
  35435. #endif
  35436. #ifdef __LITTLE_ENDIAN__
  35437. __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
  35438. int16x8_t __ret;
  35439. __ret = (int16x8_t)(__p0);
  35440. return __ret;
  35441. }
  35442. #else
  35443. __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
  35444. int16x8_t __ret;
  35445. __ret = (int16x8_t)(__p0);
  35446. return __ret;
  35447. }
  35448. #endif
  35449. #ifdef __LITTLE_ENDIAN__
  35450. __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
  35451. int16x8_t __ret;
  35452. __ret = (int16x8_t)(__p0);
  35453. return __ret;
  35454. }
  35455. #else
  35456. __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
  35457. int16x8_t __ret;
  35458. __ret = (int16x8_t)(__p0);
  35459. return __ret;
  35460. }
  35461. #endif
  35462. #ifdef __LITTLE_ENDIAN__
  35463. __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
  35464. int16x8_t __ret;
  35465. __ret = (int16x8_t)(__p0);
  35466. return __ret;
  35467. }
  35468. #else
  35469. __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
  35470. int16x8_t __ret;
  35471. __ret = (int16x8_t)(__p0);
  35472. return __ret;
  35473. }
  35474. #endif
  35475. #ifdef __LITTLE_ENDIAN__
  35476. __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
  35477. int16x8_t __ret;
  35478. __ret = (int16x8_t)(__p0);
  35479. return __ret;
  35480. }
  35481. #else
  35482. __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
  35483. int16x8_t __ret;
  35484. __ret = (int16x8_t)(__p0);
  35485. return __ret;
  35486. }
  35487. #endif
  35488. #ifdef __LITTLE_ENDIAN__
  35489. __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
  35490. uint8x8_t __ret;
  35491. __ret = (uint8x8_t)(__p0);
  35492. return __ret;
  35493. }
  35494. #else
  35495. __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
  35496. uint8x8_t __ret;
  35497. __ret = (uint8x8_t)(__p0);
  35498. return __ret;
  35499. }
  35500. #endif
  35501. #ifdef __LITTLE_ENDIAN__
  35502. __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
  35503. uint8x8_t __ret;
  35504. __ret = (uint8x8_t)(__p0);
  35505. return __ret;
  35506. }
  35507. #else
  35508. __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
  35509. uint8x8_t __ret;
  35510. __ret = (uint8x8_t)(__p0);
  35511. return __ret;
  35512. }
  35513. #endif
  35514. #ifdef __LITTLE_ENDIAN__
  35515. __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
  35516. uint8x8_t __ret;
  35517. __ret = (uint8x8_t)(__p0);
  35518. return __ret;
  35519. }
  35520. #else
  35521. __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
  35522. uint8x8_t __ret;
  35523. __ret = (uint8x8_t)(__p0);
  35524. return __ret;
  35525. }
  35526. #endif
  35527. #ifdef __LITTLE_ENDIAN__
  35528. __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
  35529. uint8x8_t __ret;
  35530. __ret = (uint8x8_t)(__p0);
  35531. return __ret;
  35532. }
  35533. #else
  35534. __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
  35535. uint8x8_t __ret;
  35536. __ret = (uint8x8_t)(__p0);
  35537. return __ret;
  35538. }
  35539. #endif
  35540. #ifdef __LITTLE_ENDIAN__
  35541. __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
  35542. uint8x8_t __ret;
  35543. __ret = (uint8x8_t)(__p0);
  35544. return __ret;
  35545. }
  35546. #else
  35547. __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
  35548. uint8x8_t __ret;
  35549. __ret = (uint8x8_t)(__p0);
  35550. return __ret;
  35551. }
  35552. #endif
  35553. #ifdef __LITTLE_ENDIAN__
  35554. __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
  35555. uint8x8_t __ret;
  35556. __ret = (uint8x8_t)(__p0);
  35557. return __ret;
  35558. }
  35559. #else
  35560. __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
  35561. uint8x8_t __ret;
  35562. __ret = (uint8x8_t)(__p0);
  35563. return __ret;
  35564. }
  35565. #endif
  35566. #ifdef __LITTLE_ENDIAN__
  35567. __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
  35568. uint8x8_t __ret;
  35569. __ret = (uint8x8_t)(__p0);
  35570. return __ret;
  35571. }
  35572. #else
  35573. __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
  35574. uint8x8_t __ret;
  35575. __ret = (uint8x8_t)(__p0);
  35576. return __ret;
  35577. }
  35578. #endif
  35579. #ifdef __LITTLE_ENDIAN__
  35580. __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
  35581. uint8x8_t __ret;
  35582. __ret = (uint8x8_t)(__p0);
  35583. return __ret;
  35584. }
  35585. #else
  35586. __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
  35587. uint8x8_t __ret;
  35588. __ret = (uint8x8_t)(__p0);
  35589. return __ret;
  35590. }
  35591. #endif
  35592. #ifdef __LITTLE_ENDIAN__
  35593. __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
  35594. uint8x8_t __ret;
  35595. __ret = (uint8x8_t)(__p0);
  35596. return __ret;
  35597. }
  35598. #else
  35599. __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
  35600. uint8x8_t __ret;
  35601. __ret = (uint8x8_t)(__p0);
  35602. return __ret;
  35603. }
  35604. #endif
  35605. #ifdef __LITTLE_ENDIAN__
  35606. __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
  35607. uint8x8_t __ret;
  35608. __ret = (uint8x8_t)(__p0);
  35609. return __ret;
  35610. }
  35611. #else
  35612. __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
  35613. uint8x8_t __ret;
  35614. __ret = (uint8x8_t)(__p0);
  35615. return __ret;
  35616. }
  35617. #endif
  35618. #ifdef __LITTLE_ENDIAN__
  35619. __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
  35620. uint8x8_t __ret;
  35621. __ret = (uint8x8_t)(__p0);
  35622. return __ret;
  35623. }
  35624. #else
  35625. __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
  35626. uint8x8_t __ret;
  35627. __ret = (uint8x8_t)(__p0);
  35628. return __ret;
  35629. }
  35630. #endif
  35631. #ifdef __LITTLE_ENDIAN__
  35632. __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
  35633. uint8x8_t __ret;
  35634. __ret = (uint8x8_t)(__p0);
  35635. return __ret;
  35636. }
  35637. #else
  35638. __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
  35639. uint8x8_t __ret;
  35640. __ret = (uint8x8_t)(__p0);
  35641. return __ret;
  35642. }
  35643. #endif
  35644. #ifdef __LITTLE_ENDIAN__
  35645. __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
  35646. uint8x8_t __ret;
  35647. __ret = (uint8x8_t)(__p0);
  35648. return __ret;
  35649. }
  35650. #else
  35651. __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
  35652. uint8x8_t __ret;
  35653. __ret = (uint8x8_t)(__p0);
  35654. return __ret;
  35655. }
  35656. #endif
  35657. #ifdef __LITTLE_ENDIAN__
  35658. __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
  35659. uint32x2_t __ret;
  35660. __ret = (uint32x2_t)(__p0);
  35661. return __ret;
  35662. }
  35663. #else
  35664. __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
  35665. uint32x2_t __ret;
  35666. __ret = (uint32x2_t)(__p0);
  35667. return __ret;
  35668. }
  35669. #endif
  35670. #ifdef __LITTLE_ENDIAN__
  35671. __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
  35672. uint32x2_t __ret;
  35673. __ret = (uint32x2_t)(__p0);
  35674. return __ret;
  35675. }
  35676. #else
  35677. __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
  35678. uint32x2_t __ret;
  35679. __ret = (uint32x2_t)(__p0);
  35680. return __ret;
  35681. }
  35682. #endif
  35683. #ifdef __LITTLE_ENDIAN__
  35684. __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
  35685. uint32x2_t __ret;
  35686. __ret = (uint32x2_t)(__p0);
  35687. return __ret;
  35688. }
  35689. #else
  35690. __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
  35691. uint32x2_t __ret;
  35692. __ret = (uint32x2_t)(__p0);
  35693. return __ret;
  35694. }
  35695. #endif
  35696. #ifdef __LITTLE_ENDIAN__
  35697. __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
  35698. uint32x2_t __ret;
  35699. __ret = (uint32x2_t)(__p0);
  35700. return __ret;
  35701. }
  35702. #else
  35703. __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
  35704. uint32x2_t __ret;
  35705. __ret = (uint32x2_t)(__p0);
  35706. return __ret;
  35707. }
  35708. #endif
  35709. #ifdef __LITTLE_ENDIAN__
  35710. __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
  35711. uint32x2_t __ret;
  35712. __ret = (uint32x2_t)(__p0);
  35713. return __ret;
  35714. }
  35715. #else
  35716. __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
  35717. uint32x2_t __ret;
  35718. __ret = (uint32x2_t)(__p0);
  35719. return __ret;
  35720. }
  35721. #endif
  35722. #ifdef __LITTLE_ENDIAN__
  35723. __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
  35724. uint32x2_t __ret;
  35725. __ret = (uint32x2_t)(__p0);
  35726. return __ret;
  35727. }
  35728. #else
  35729. __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
  35730. uint32x2_t __ret;
  35731. __ret = (uint32x2_t)(__p0);
  35732. return __ret;
  35733. }
  35734. #endif
  35735. #ifdef __LITTLE_ENDIAN__
  35736. __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
  35737. uint32x2_t __ret;
  35738. __ret = (uint32x2_t)(__p0);
  35739. return __ret;
  35740. }
  35741. #else
  35742. __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
  35743. uint32x2_t __ret;
  35744. __ret = (uint32x2_t)(__p0);
  35745. return __ret;
  35746. }
  35747. #endif
  35748. #ifdef __LITTLE_ENDIAN__
  35749. __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
  35750. uint32x2_t __ret;
  35751. __ret = (uint32x2_t)(__p0);
  35752. return __ret;
  35753. }
  35754. #else
  35755. __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
  35756. uint32x2_t __ret;
  35757. __ret = (uint32x2_t)(__p0);
  35758. return __ret;
  35759. }
  35760. #endif
  35761. #ifdef __LITTLE_ENDIAN__
  35762. __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
  35763. uint32x2_t __ret;
  35764. __ret = (uint32x2_t)(__p0);
  35765. return __ret;
  35766. }
  35767. #else
  35768. __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
  35769. uint32x2_t __ret;
  35770. __ret = (uint32x2_t)(__p0);
  35771. return __ret;
  35772. }
  35773. #endif
  35774. #ifdef __LITTLE_ENDIAN__
  35775. __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
  35776. uint32x2_t __ret;
  35777. __ret = (uint32x2_t)(__p0);
  35778. return __ret;
  35779. }
  35780. #else
  35781. __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
  35782. uint32x2_t __ret;
  35783. __ret = (uint32x2_t)(__p0);
  35784. return __ret;
  35785. }
  35786. #endif
  35787. #ifdef __LITTLE_ENDIAN__
  35788. __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
  35789. uint32x2_t __ret;
  35790. __ret = (uint32x2_t)(__p0);
  35791. return __ret;
  35792. }
  35793. #else
  35794. __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
  35795. uint32x2_t __ret;
  35796. __ret = (uint32x2_t)(__p0);
  35797. return __ret;
  35798. }
  35799. #endif
  35800. #ifdef __LITTLE_ENDIAN__
  35801. __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
  35802. uint32x2_t __ret;
  35803. __ret = (uint32x2_t)(__p0);
  35804. return __ret;
  35805. }
  35806. #else
  35807. __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
  35808. uint32x2_t __ret;
  35809. __ret = (uint32x2_t)(__p0);
  35810. return __ret;
  35811. }
  35812. #endif
  35813. #ifdef __LITTLE_ENDIAN__
  35814. __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
  35815. uint32x2_t __ret;
  35816. __ret = (uint32x2_t)(__p0);
  35817. return __ret;
  35818. }
  35819. #else
  35820. __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
  35821. uint32x2_t __ret;
  35822. __ret = (uint32x2_t)(__p0);
  35823. return __ret;
  35824. }
  35825. #endif
  35826. #ifdef __LITTLE_ENDIAN__
  35827. __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
  35828. uint64x1_t __ret;
  35829. __ret = (uint64x1_t)(__p0);
  35830. return __ret;
  35831. }
  35832. #else
  35833. __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
  35834. uint64x1_t __ret;
  35835. __ret = (uint64x1_t)(__p0);
  35836. return __ret;
  35837. }
  35838. #endif
  35839. #ifdef __LITTLE_ENDIAN__
  35840. __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
  35841. uint64x1_t __ret;
  35842. __ret = (uint64x1_t)(__p0);
  35843. return __ret;
  35844. }
  35845. #else
  35846. __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
  35847. uint64x1_t __ret;
  35848. __ret = (uint64x1_t)(__p0);
  35849. return __ret;
  35850. }
  35851. #endif
  35852. #ifdef __LITTLE_ENDIAN__
  35853. __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
  35854. uint64x1_t __ret;
  35855. __ret = (uint64x1_t)(__p0);
  35856. return __ret;
  35857. }
  35858. #else
  35859. __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
  35860. uint64x1_t __ret;
  35861. __ret = (uint64x1_t)(__p0);
  35862. return __ret;
  35863. }
  35864. #endif
  35865. #ifdef __LITTLE_ENDIAN__
  35866. __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
  35867. uint64x1_t __ret;
  35868. __ret = (uint64x1_t)(__p0);
  35869. return __ret;
  35870. }
  35871. #else
  35872. __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
  35873. uint64x1_t __ret;
  35874. __ret = (uint64x1_t)(__p0);
  35875. return __ret;
  35876. }
  35877. #endif
  35878. #ifdef __LITTLE_ENDIAN__
  35879. __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
  35880. uint64x1_t __ret;
  35881. __ret = (uint64x1_t)(__p0);
  35882. return __ret;
  35883. }
  35884. #else
  35885. __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
  35886. uint64x1_t __ret;
  35887. __ret = (uint64x1_t)(__p0);
  35888. return __ret;
  35889. }
  35890. #endif
  35891. #ifdef __LITTLE_ENDIAN__
  35892. __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
  35893. uint64x1_t __ret;
  35894. __ret = (uint64x1_t)(__p0);
  35895. return __ret;
  35896. }
  35897. #else
  35898. __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
  35899. uint64x1_t __ret;
  35900. __ret = (uint64x1_t)(__p0);
  35901. return __ret;
  35902. }
  35903. #endif
  35904. #ifdef __LITTLE_ENDIAN__
  35905. __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
  35906. uint64x1_t __ret;
  35907. __ret = (uint64x1_t)(__p0);
  35908. return __ret;
  35909. }
  35910. #else
  35911. __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
  35912. uint64x1_t __ret;
  35913. __ret = (uint64x1_t)(__p0);
  35914. return __ret;
  35915. }
  35916. #endif
  35917. #ifdef __LITTLE_ENDIAN__
  35918. __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
  35919. uint64x1_t __ret;
  35920. __ret = (uint64x1_t)(__p0);
  35921. return __ret;
  35922. }
  35923. #else
  35924. __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
  35925. uint64x1_t __ret;
  35926. __ret = (uint64x1_t)(__p0);
  35927. return __ret;
  35928. }
  35929. #endif
  35930. #ifdef __LITTLE_ENDIAN__
  35931. __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
  35932. uint64x1_t __ret;
  35933. __ret = (uint64x1_t)(__p0);
  35934. return __ret;
  35935. }
  35936. #else
  35937. __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
  35938. uint64x1_t __ret;
  35939. __ret = (uint64x1_t)(__p0);
  35940. return __ret;
  35941. }
  35942. #endif
  35943. #ifdef __LITTLE_ENDIAN__
  35944. __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
  35945. uint64x1_t __ret;
  35946. __ret = (uint64x1_t)(__p0);
  35947. return __ret;
  35948. }
  35949. #else
  35950. __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
  35951. uint64x1_t __ret;
  35952. __ret = (uint64x1_t)(__p0);
  35953. return __ret;
  35954. }
  35955. #endif
  35956. #ifdef __LITTLE_ENDIAN__
  35957. __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
  35958. uint64x1_t __ret;
  35959. __ret = (uint64x1_t)(__p0);
  35960. return __ret;
  35961. }
  35962. #else
  35963. __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
  35964. uint64x1_t __ret;
  35965. __ret = (uint64x1_t)(__p0);
  35966. return __ret;
  35967. }
  35968. #endif
  35969. #ifdef __LITTLE_ENDIAN__
  35970. __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
  35971. uint64x1_t __ret;
  35972. __ret = (uint64x1_t)(__p0);
  35973. return __ret;
  35974. }
  35975. #else
  35976. __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
  35977. uint64x1_t __ret;
  35978. __ret = (uint64x1_t)(__p0);
  35979. return __ret;
  35980. }
  35981. #endif
  35982. #ifdef __LITTLE_ENDIAN__
  35983. __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
  35984. uint64x1_t __ret;
  35985. __ret = (uint64x1_t)(__p0);
  35986. return __ret;
  35987. }
  35988. #else
  35989. __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
  35990. uint64x1_t __ret;
  35991. __ret = (uint64x1_t)(__p0);
  35992. return __ret;
  35993. }
  35994. #endif
  35995. #ifdef __LITTLE_ENDIAN__
  35996. __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
  35997. uint16x4_t __ret;
  35998. __ret = (uint16x4_t)(__p0);
  35999. return __ret;
  36000. }
  36001. #else
  36002. __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
  36003. uint16x4_t __ret;
  36004. __ret = (uint16x4_t)(__p0);
  36005. return __ret;
  36006. }
  36007. #endif
  36008. #ifdef __LITTLE_ENDIAN__
  36009. __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
  36010. uint16x4_t __ret;
  36011. __ret = (uint16x4_t)(__p0);
  36012. return __ret;
  36013. }
  36014. #else
  36015. __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
  36016. uint16x4_t __ret;
  36017. __ret = (uint16x4_t)(__p0);
  36018. return __ret;
  36019. }
  36020. #endif
  36021. #ifdef __LITTLE_ENDIAN__
  36022. __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
  36023. uint16x4_t __ret;
  36024. __ret = (uint16x4_t)(__p0);
  36025. return __ret;
  36026. }
  36027. #else
  36028. __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
  36029. uint16x4_t __ret;
  36030. __ret = (uint16x4_t)(__p0);
  36031. return __ret;
  36032. }
  36033. #endif
  36034. #ifdef __LITTLE_ENDIAN__
  36035. __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
  36036. uint16x4_t __ret;
  36037. __ret = (uint16x4_t)(__p0);
  36038. return __ret;
  36039. }
  36040. #else
  36041. __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
  36042. uint16x4_t __ret;
  36043. __ret = (uint16x4_t)(__p0);
  36044. return __ret;
  36045. }
  36046. #endif
  36047. #ifdef __LITTLE_ENDIAN__
  36048. __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
  36049. uint16x4_t __ret;
  36050. __ret = (uint16x4_t)(__p0);
  36051. return __ret;
  36052. }
  36053. #else
  36054. __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
  36055. uint16x4_t __ret;
  36056. __ret = (uint16x4_t)(__p0);
  36057. return __ret;
  36058. }
  36059. #endif
  36060. #ifdef __LITTLE_ENDIAN__
  36061. __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
  36062. uint16x4_t __ret;
  36063. __ret = (uint16x4_t)(__p0);
  36064. return __ret;
  36065. }
  36066. #else
  36067. __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
  36068. uint16x4_t __ret;
  36069. __ret = (uint16x4_t)(__p0);
  36070. return __ret;
  36071. }
  36072. #endif
  36073. #ifdef __LITTLE_ENDIAN__
  36074. __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
  36075. uint16x4_t __ret;
  36076. __ret = (uint16x4_t)(__p0);
  36077. return __ret;
  36078. }
  36079. #else
  36080. __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
  36081. uint16x4_t __ret;
  36082. __ret = (uint16x4_t)(__p0);
  36083. return __ret;
  36084. }
  36085. #endif
  36086. #ifdef __LITTLE_ENDIAN__
  36087. __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
  36088. uint16x4_t __ret;
  36089. __ret = (uint16x4_t)(__p0);
  36090. return __ret;
  36091. }
  36092. #else
  36093. __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
  36094. uint16x4_t __ret;
  36095. __ret = (uint16x4_t)(__p0);
  36096. return __ret;
  36097. }
  36098. #endif
  36099. #ifdef __LITTLE_ENDIAN__
  36100. __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
  36101. uint16x4_t __ret;
  36102. __ret = (uint16x4_t)(__p0);
  36103. return __ret;
  36104. }
  36105. #else
  36106. __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
  36107. uint16x4_t __ret;
  36108. __ret = (uint16x4_t)(__p0);
  36109. return __ret;
  36110. }
  36111. #endif
  36112. #ifdef __LITTLE_ENDIAN__
  36113. __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
  36114. uint16x4_t __ret;
  36115. __ret = (uint16x4_t)(__p0);
  36116. return __ret;
  36117. }
  36118. #else
  36119. __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
  36120. uint16x4_t __ret;
  36121. __ret = (uint16x4_t)(__p0);
  36122. return __ret;
  36123. }
  36124. #endif
  36125. #ifdef __LITTLE_ENDIAN__
  36126. __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
  36127. uint16x4_t __ret;
  36128. __ret = (uint16x4_t)(__p0);
  36129. return __ret;
  36130. }
  36131. #else
  36132. __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
  36133. uint16x4_t __ret;
  36134. __ret = (uint16x4_t)(__p0);
  36135. return __ret;
  36136. }
  36137. #endif
  36138. #ifdef __LITTLE_ENDIAN__
  36139. __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
  36140. uint16x4_t __ret;
  36141. __ret = (uint16x4_t)(__p0);
  36142. return __ret;
  36143. }
  36144. #else
  36145. __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
  36146. uint16x4_t __ret;
  36147. __ret = (uint16x4_t)(__p0);
  36148. return __ret;
  36149. }
  36150. #endif
  36151. #ifdef __LITTLE_ENDIAN__
  36152. __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
  36153. uint16x4_t __ret;
  36154. __ret = (uint16x4_t)(__p0);
  36155. return __ret;
  36156. }
  36157. #else
  36158. __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
  36159. uint16x4_t __ret;
  36160. __ret = (uint16x4_t)(__p0);
  36161. return __ret;
  36162. }
  36163. #endif
  36164. #ifdef __LITTLE_ENDIAN__
  36165. __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
  36166. int8x8_t __ret;
  36167. __ret = (int8x8_t)(__p0);
  36168. return __ret;
  36169. }
  36170. #else
  36171. __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
  36172. int8x8_t __ret;
  36173. __ret = (int8x8_t)(__p0);
  36174. return __ret;
  36175. }
  36176. #endif
  36177. #ifdef __LITTLE_ENDIAN__
  36178. __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
  36179. int8x8_t __ret;
  36180. __ret = (int8x8_t)(__p0);
  36181. return __ret;
  36182. }
  36183. #else
  36184. __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
  36185. int8x8_t __ret;
  36186. __ret = (int8x8_t)(__p0);
  36187. return __ret;
  36188. }
  36189. #endif
  36190. #ifdef __LITTLE_ENDIAN__
  36191. __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
  36192. int8x8_t __ret;
  36193. __ret = (int8x8_t)(__p0);
  36194. return __ret;
  36195. }
  36196. #else
  36197. __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
  36198. int8x8_t __ret;
  36199. __ret = (int8x8_t)(__p0);
  36200. return __ret;
  36201. }
  36202. #endif
  36203. #ifdef __LITTLE_ENDIAN__
  36204. __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
  36205. int8x8_t __ret;
  36206. __ret = (int8x8_t)(__p0);
  36207. return __ret;
  36208. }
  36209. #else
  36210. __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
  36211. int8x8_t __ret;
  36212. __ret = (int8x8_t)(__p0);
  36213. return __ret;
  36214. }
  36215. #endif
  36216. #ifdef __LITTLE_ENDIAN__
  36217. __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
  36218. int8x8_t __ret;
  36219. __ret = (int8x8_t)(__p0);
  36220. return __ret;
  36221. }
  36222. #else
  36223. __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
  36224. int8x8_t __ret;
  36225. __ret = (int8x8_t)(__p0);
  36226. return __ret;
  36227. }
  36228. #endif
  36229. #ifdef __LITTLE_ENDIAN__
  36230. __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
  36231. int8x8_t __ret;
  36232. __ret = (int8x8_t)(__p0);
  36233. return __ret;
  36234. }
  36235. #else
  36236. __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
  36237. int8x8_t __ret;
  36238. __ret = (int8x8_t)(__p0);
  36239. return __ret;
  36240. }
  36241. #endif
  36242. #ifdef __LITTLE_ENDIAN__
  36243. __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
  36244. int8x8_t __ret;
  36245. __ret = (int8x8_t)(__p0);
  36246. return __ret;
  36247. }
  36248. #else
  36249. __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
  36250. int8x8_t __ret;
  36251. __ret = (int8x8_t)(__p0);
  36252. return __ret;
  36253. }
  36254. #endif
  36255. #ifdef __LITTLE_ENDIAN__
  36256. __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
  36257. int8x8_t __ret;
  36258. __ret = (int8x8_t)(__p0);
  36259. return __ret;
  36260. }
  36261. #else
  36262. __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
  36263. int8x8_t __ret;
  36264. __ret = (int8x8_t)(__p0);
  36265. return __ret;
  36266. }
  36267. #endif
  36268. #ifdef __LITTLE_ENDIAN__
  36269. __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
  36270. int8x8_t __ret;
  36271. __ret = (int8x8_t)(__p0);
  36272. return __ret;
  36273. }
  36274. #else
  36275. __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
  36276. int8x8_t __ret;
  36277. __ret = (int8x8_t)(__p0);
  36278. return __ret;
  36279. }
  36280. #endif
  36281. #ifdef __LITTLE_ENDIAN__
  36282. __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
  36283. int8x8_t __ret;
  36284. __ret = (int8x8_t)(__p0);
  36285. return __ret;
  36286. }
  36287. #else
  36288. __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
  36289. int8x8_t __ret;
  36290. __ret = (int8x8_t)(__p0);
  36291. return __ret;
  36292. }
  36293. #endif
  36294. #ifdef __LITTLE_ENDIAN__
  36295. __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
  36296. int8x8_t __ret;
  36297. __ret = (int8x8_t)(__p0);
  36298. return __ret;
  36299. }
  36300. #else
  36301. __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
  36302. int8x8_t __ret;
  36303. __ret = (int8x8_t)(__p0);
  36304. return __ret;
  36305. }
  36306. #endif
  36307. #ifdef __LITTLE_ENDIAN__
  36308. __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
  36309. int8x8_t __ret;
  36310. __ret = (int8x8_t)(__p0);
  36311. return __ret;
  36312. }
  36313. #else
  36314. __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
  36315. int8x8_t __ret;
  36316. __ret = (int8x8_t)(__p0);
  36317. return __ret;
  36318. }
  36319. #endif
  36320. #ifdef __LITTLE_ENDIAN__
  36321. __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
  36322. int8x8_t __ret;
  36323. __ret = (int8x8_t)(__p0);
  36324. return __ret;
  36325. }
  36326. #else
  36327. __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
  36328. int8x8_t __ret;
  36329. __ret = (int8x8_t)(__p0);
  36330. return __ret;
  36331. }
  36332. #endif
  36333. #ifdef __LITTLE_ENDIAN__
  36334. __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
  36335. float64x1_t __ret;
  36336. __ret = (float64x1_t)(__p0);
  36337. return __ret;
  36338. }
  36339. #else
  36340. __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
  36341. float64x1_t __ret;
  36342. __ret = (float64x1_t)(__p0);
  36343. return __ret;
  36344. }
  36345. #endif
  36346. #ifdef __LITTLE_ENDIAN__
  36347. __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
  36348. float64x1_t __ret;
  36349. __ret = (float64x1_t)(__p0);
  36350. return __ret;
  36351. }
  36352. #else
  36353. __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
  36354. float64x1_t __ret;
  36355. __ret = (float64x1_t)(__p0);
  36356. return __ret;
  36357. }
  36358. #endif
  36359. #ifdef __LITTLE_ENDIAN__
  36360. __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
  36361. float64x1_t __ret;
  36362. __ret = (float64x1_t)(__p0);
  36363. return __ret;
  36364. }
  36365. #else
  36366. __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
  36367. float64x1_t __ret;
  36368. __ret = (float64x1_t)(__p0);
  36369. return __ret;
  36370. }
  36371. #endif
  36372. #ifdef __LITTLE_ENDIAN__
  36373. __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
  36374. float64x1_t __ret;
  36375. __ret = (float64x1_t)(__p0);
  36376. return __ret;
  36377. }
  36378. #else
  36379. __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
  36380. float64x1_t __ret;
  36381. __ret = (float64x1_t)(__p0);
  36382. return __ret;
  36383. }
  36384. #endif
  36385. #ifdef __LITTLE_ENDIAN__
  36386. __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
  36387. float64x1_t __ret;
  36388. __ret = (float64x1_t)(__p0);
  36389. return __ret;
  36390. }
  36391. #else
  36392. __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
  36393. float64x1_t __ret;
  36394. __ret = (float64x1_t)(__p0);
  36395. return __ret;
  36396. }
  36397. #endif
  36398. #ifdef __LITTLE_ENDIAN__
  36399. __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
  36400. float64x1_t __ret;
  36401. __ret = (float64x1_t)(__p0);
  36402. return __ret;
  36403. }
  36404. #else
  36405. __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
  36406. float64x1_t __ret;
  36407. __ret = (float64x1_t)(__p0);
  36408. return __ret;
  36409. }
  36410. #endif
  36411. #ifdef __LITTLE_ENDIAN__
  36412. __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
  36413. float64x1_t __ret;
  36414. __ret = (float64x1_t)(__p0);
  36415. return __ret;
  36416. }
  36417. #else
  36418. __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
  36419. float64x1_t __ret;
  36420. __ret = (float64x1_t)(__p0);
  36421. return __ret;
  36422. }
  36423. #endif
  36424. #ifdef __LITTLE_ENDIAN__
  36425. __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
  36426. float64x1_t __ret;
  36427. __ret = (float64x1_t)(__p0);
  36428. return __ret;
  36429. }
  36430. #else
  36431. __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
  36432. float64x1_t __ret;
  36433. __ret = (float64x1_t)(__p0);
  36434. return __ret;
  36435. }
  36436. #endif
  36437. #ifdef __LITTLE_ENDIAN__
  36438. __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
  36439. float64x1_t __ret;
  36440. __ret = (float64x1_t)(__p0);
  36441. return __ret;
  36442. }
  36443. #else
  36444. __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
  36445. float64x1_t __ret;
  36446. __ret = (float64x1_t)(__p0);
  36447. return __ret;
  36448. }
  36449. #endif
  36450. #ifdef __LITTLE_ENDIAN__
  36451. __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
  36452. float64x1_t __ret;
  36453. __ret = (float64x1_t)(__p0);
  36454. return __ret;
  36455. }
  36456. #else
  36457. __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
  36458. float64x1_t __ret;
  36459. __ret = (float64x1_t)(__p0);
  36460. return __ret;
  36461. }
  36462. #endif
  36463. #ifdef __LITTLE_ENDIAN__
  36464. __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
  36465. float64x1_t __ret;
  36466. __ret = (float64x1_t)(__p0);
  36467. return __ret;
  36468. }
  36469. #else
  36470. __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
  36471. float64x1_t __ret;
  36472. __ret = (float64x1_t)(__p0);
  36473. return __ret;
  36474. }
  36475. #endif
  36476. #ifdef __LITTLE_ENDIAN__
  36477. __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
  36478. float64x1_t __ret;
  36479. __ret = (float64x1_t)(__p0);
  36480. return __ret;
  36481. }
  36482. #else
  36483. __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
  36484. float64x1_t __ret;
  36485. __ret = (float64x1_t)(__p0);
  36486. return __ret;
  36487. }
  36488. #endif
  36489. #ifdef __LITTLE_ENDIAN__
  36490. __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
  36491. float64x1_t __ret;
  36492. __ret = (float64x1_t)(__p0);
  36493. return __ret;
  36494. }
  36495. #else
  36496. __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
  36497. float64x1_t __ret;
  36498. __ret = (float64x1_t)(__p0);
  36499. return __ret;
  36500. }
  36501. #endif
  36502. #ifdef __LITTLE_ENDIAN__
  36503. __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
  36504. float32x2_t __ret;
  36505. __ret = (float32x2_t)(__p0);
  36506. return __ret;
  36507. }
  36508. #else
  36509. __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
  36510. float32x2_t __ret;
  36511. __ret = (float32x2_t)(__p0);
  36512. return __ret;
  36513. }
  36514. #endif
  36515. #ifdef __LITTLE_ENDIAN__
  36516. __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
  36517. float32x2_t __ret;
  36518. __ret = (float32x2_t)(__p0);
  36519. return __ret;
  36520. }
  36521. #else
  36522. __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
  36523. float32x2_t __ret;
  36524. __ret = (float32x2_t)(__p0);
  36525. return __ret;
  36526. }
  36527. #endif
  36528. #ifdef __LITTLE_ENDIAN__
  36529. __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
  36530. float32x2_t __ret;
  36531. __ret = (float32x2_t)(__p0);
  36532. return __ret;
  36533. }
  36534. #else
  36535. __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
  36536. float32x2_t __ret;
  36537. __ret = (float32x2_t)(__p0);
  36538. return __ret;
  36539. }
  36540. #endif
  36541. #ifdef __LITTLE_ENDIAN__
  36542. __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
  36543. float32x2_t __ret;
  36544. __ret = (float32x2_t)(__p0);
  36545. return __ret;
  36546. }
  36547. #else
  36548. __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
  36549. float32x2_t __ret;
  36550. __ret = (float32x2_t)(__p0);
  36551. return __ret;
  36552. }
  36553. #endif
  36554. #ifdef __LITTLE_ENDIAN__
  36555. __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
  36556. float32x2_t __ret;
  36557. __ret = (float32x2_t)(__p0);
  36558. return __ret;
  36559. }
  36560. #else
  36561. __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
  36562. float32x2_t __ret;
  36563. __ret = (float32x2_t)(__p0);
  36564. return __ret;
  36565. }
  36566. #endif
  36567. #ifdef __LITTLE_ENDIAN__
  36568. __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
  36569. float32x2_t __ret;
  36570. __ret = (float32x2_t)(__p0);
  36571. return __ret;
  36572. }
  36573. #else
  36574. __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
  36575. float32x2_t __ret;
  36576. __ret = (float32x2_t)(__p0);
  36577. return __ret;
  36578. }
  36579. #endif
  36580. #ifdef __LITTLE_ENDIAN__
  36581. __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
  36582. float32x2_t __ret;
  36583. __ret = (float32x2_t)(__p0);
  36584. return __ret;
  36585. }
  36586. #else
  36587. __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
  36588. float32x2_t __ret;
  36589. __ret = (float32x2_t)(__p0);
  36590. return __ret;
  36591. }
  36592. #endif
  36593. #ifdef __LITTLE_ENDIAN__
  36594. __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
  36595. float32x2_t __ret;
  36596. __ret = (float32x2_t)(__p0);
  36597. return __ret;
  36598. }
  36599. #else
  36600. __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
  36601. float32x2_t __ret;
  36602. __ret = (float32x2_t)(__p0);
  36603. return __ret;
  36604. }
  36605. #endif
  36606. #ifdef __LITTLE_ENDIAN__
  36607. __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
  36608. float32x2_t __ret;
  36609. __ret = (float32x2_t)(__p0);
  36610. return __ret;
  36611. }
  36612. #else
  36613. __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
  36614. float32x2_t __ret;
  36615. __ret = (float32x2_t)(__p0);
  36616. return __ret;
  36617. }
  36618. #endif
  36619. #ifdef __LITTLE_ENDIAN__
  36620. __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
  36621. float32x2_t __ret;
  36622. __ret = (float32x2_t)(__p0);
  36623. return __ret;
  36624. }
  36625. #else
  36626. __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
  36627. float32x2_t __ret;
  36628. __ret = (float32x2_t)(__p0);
  36629. return __ret;
  36630. }
  36631. #endif
  36632. #ifdef __LITTLE_ENDIAN__
  36633. __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
  36634. float32x2_t __ret;
  36635. __ret = (float32x2_t)(__p0);
  36636. return __ret;
  36637. }
  36638. #else
  36639. __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
  36640. float32x2_t __ret;
  36641. __ret = (float32x2_t)(__p0);
  36642. return __ret;
  36643. }
  36644. #endif
  36645. #ifdef __LITTLE_ENDIAN__
  36646. __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
  36647. float32x2_t __ret;
  36648. __ret = (float32x2_t)(__p0);
  36649. return __ret;
  36650. }
  36651. #else
  36652. __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
  36653. float32x2_t __ret;
  36654. __ret = (float32x2_t)(__p0);
  36655. return __ret;
  36656. }
  36657. #endif
  36658. #ifdef __LITTLE_ENDIAN__
  36659. __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
  36660. float32x2_t __ret;
  36661. __ret = (float32x2_t)(__p0);
  36662. return __ret;
  36663. }
  36664. #else
  36665. __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
  36666. float32x2_t __ret;
  36667. __ret = (float32x2_t)(__p0);
  36668. return __ret;
  36669. }
  36670. #endif
  36671. #ifdef __LITTLE_ENDIAN__
  36672. __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
  36673. float16x4_t __ret;
  36674. __ret = (float16x4_t)(__p0);
  36675. return __ret;
  36676. }
  36677. #else
  36678. __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
  36679. float16x4_t __ret;
  36680. __ret = (float16x4_t)(__p0);
  36681. return __ret;
  36682. }
  36683. #endif
  36684. #ifdef __LITTLE_ENDIAN__
  36685. __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
  36686. float16x4_t __ret;
  36687. __ret = (float16x4_t)(__p0);
  36688. return __ret;
  36689. }
  36690. #else
  36691. __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
  36692. float16x4_t __ret;
  36693. __ret = (float16x4_t)(__p0);
  36694. return __ret;
  36695. }
  36696. #endif
  36697. #ifdef __LITTLE_ENDIAN__
  36698. __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
  36699. float16x4_t __ret;
  36700. __ret = (float16x4_t)(__p0);
  36701. return __ret;
  36702. }
  36703. #else
  36704. __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
  36705. float16x4_t __ret;
  36706. __ret = (float16x4_t)(__p0);
  36707. return __ret;
  36708. }
  36709. #endif
  36710. #ifdef __LITTLE_ENDIAN__
  36711. __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
  36712. float16x4_t __ret;
  36713. __ret = (float16x4_t)(__p0);
  36714. return __ret;
  36715. }
  36716. #else
  36717. __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
  36718. float16x4_t __ret;
  36719. __ret = (float16x4_t)(__p0);
  36720. return __ret;
  36721. }
  36722. #endif
  36723. #ifdef __LITTLE_ENDIAN__
  36724. __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
  36725. float16x4_t __ret;
  36726. __ret = (float16x4_t)(__p0);
  36727. return __ret;
  36728. }
  36729. #else
  36730. __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
  36731. float16x4_t __ret;
  36732. __ret = (float16x4_t)(__p0);
  36733. return __ret;
  36734. }
  36735. #endif
  36736. #ifdef __LITTLE_ENDIAN__
  36737. __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
  36738. float16x4_t __ret;
  36739. __ret = (float16x4_t)(__p0);
  36740. return __ret;
  36741. }
  36742. #else
  36743. __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
  36744. float16x4_t __ret;
  36745. __ret = (float16x4_t)(__p0);
  36746. return __ret;
  36747. }
  36748. #endif
  36749. #ifdef __LITTLE_ENDIAN__
  36750. __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
  36751. float16x4_t __ret;
  36752. __ret = (float16x4_t)(__p0);
  36753. return __ret;
  36754. }
  36755. #else
  36756. __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
  36757. float16x4_t __ret;
  36758. __ret = (float16x4_t)(__p0);
  36759. return __ret;
  36760. }
  36761. #endif
  36762. #ifdef __LITTLE_ENDIAN__
  36763. __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
  36764. float16x4_t __ret;
  36765. __ret = (float16x4_t)(__p0);
  36766. return __ret;
  36767. }
  36768. #else
  36769. __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
  36770. float16x4_t __ret;
  36771. __ret = (float16x4_t)(__p0);
  36772. return __ret;
  36773. }
  36774. #endif
  36775. #ifdef __LITTLE_ENDIAN__
  36776. __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
  36777. float16x4_t __ret;
  36778. __ret = (float16x4_t)(__p0);
  36779. return __ret;
  36780. }
  36781. #else
  36782. __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
  36783. float16x4_t __ret;
  36784. __ret = (float16x4_t)(__p0);
  36785. return __ret;
  36786. }
  36787. #endif
  36788. #ifdef __LITTLE_ENDIAN__
  36789. __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
  36790. float16x4_t __ret;
  36791. __ret = (float16x4_t)(__p0);
  36792. return __ret;
  36793. }
  36794. #else
  36795. __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
  36796. float16x4_t __ret;
  36797. __ret = (float16x4_t)(__p0);
  36798. return __ret;
  36799. }
  36800. #endif
  36801. #ifdef __LITTLE_ENDIAN__
  36802. __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
  36803. float16x4_t __ret;
  36804. __ret = (float16x4_t)(__p0);
  36805. return __ret;
  36806. }
  36807. #else
  36808. __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
  36809. float16x4_t __ret;
  36810. __ret = (float16x4_t)(__p0);
  36811. return __ret;
  36812. }
  36813. #endif
  36814. #ifdef __LITTLE_ENDIAN__
  36815. __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
  36816. float16x4_t __ret;
  36817. __ret = (float16x4_t)(__p0);
  36818. return __ret;
  36819. }
  36820. #else
  36821. __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
  36822. float16x4_t __ret;
  36823. __ret = (float16x4_t)(__p0);
  36824. return __ret;
  36825. }
  36826. #endif
  36827. #ifdef __LITTLE_ENDIAN__
  36828. __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
  36829. float16x4_t __ret;
  36830. __ret = (float16x4_t)(__p0);
  36831. return __ret;
  36832. }
  36833. #else
  36834. __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
  36835. float16x4_t __ret;
  36836. __ret = (float16x4_t)(__p0);
  36837. return __ret;
  36838. }
  36839. #endif
  36840. #ifdef __LITTLE_ENDIAN__
  36841. __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
  36842. int32x2_t __ret;
  36843. __ret = (int32x2_t)(__p0);
  36844. return __ret;
  36845. }
  36846. #else
  36847. __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
  36848. int32x2_t __ret;
  36849. __ret = (int32x2_t)(__p0);
  36850. return __ret;
  36851. }
  36852. #endif
  36853. #ifdef __LITTLE_ENDIAN__
  36854. __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
  36855. int32x2_t __ret;
  36856. __ret = (int32x2_t)(__p0);
  36857. return __ret;
  36858. }
  36859. #else
  36860. __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
  36861. int32x2_t __ret;
  36862. __ret = (int32x2_t)(__p0);
  36863. return __ret;
  36864. }
  36865. #endif
  36866. #ifdef __LITTLE_ENDIAN__
  36867. __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
  36868. int32x2_t __ret;
  36869. __ret = (int32x2_t)(__p0);
  36870. return __ret;
  36871. }
  36872. #else
  36873. __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
  36874. int32x2_t __ret;
  36875. __ret = (int32x2_t)(__p0);
  36876. return __ret;
  36877. }
  36878. #endif
  36879. #ifdef __LITTLE_ENDIAN__
  36880. __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
  36881. int32x2_t __ret;
  36882. __ret = (int32x2_t)(__p0);
  36883. return __ret;
  36884. }
  36885. #else
  36886. __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
  36887. int32x2_t __ret;
  36888. __ret = (int32x2_t)(__p0);
  36889. return __ret;
  36890. }
  36891. #endif
  36892. #ifdef __LITTLE_ENDIAN__
  36893. __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
  36894. int32x2_t __ret;
  36895. __ret = (int32x2_t)(__p0);
  36896. return __ret;
  36897. }
  36898. #else
  36899. __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
  36900. int32x2_t __ret;
  36901. __ret = (int32x2_t)(__p0);
  36902. return __ret;
  36903. }
  36904. #endif
  36905. #ifdef __LITTLE_ENDIAN__
  36906. __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
  36907. int32x2_t __ret;
  36908. __ret = (int32x2_t)(__p0);
  36909. return __ret;
  36910. }
  36911. #else
  36912. __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
  36913. int32x2_t __ret;
  36914. __ret = (int32x2_t)(__p0);
  36915. return __ret;
  36916. }
  36917. #endif
  36918. #ifdef __LITTLE_ENDIAN__
  36919. __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
  36920. int32x2_t __ret;
  36921. __ret = (int32x2_t)(__p0);
  36922. return __ret;
  36923. }
  36924. #else
  36925. __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
  36926. int32x2_t __ret;
  36927. __ret = (int32x2_t)(__p0);
  36928. return __ret;
  36929. }
  36930. #endif
  36931. #ifdef __LITTLE_ENDIAN__
  36932. __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
  36933. int32x2_t __ret;
  36934. __ret = (int32x2_t)(__p0);
  36935. return __ret;
  36936. }
  36937. #else
  36938. __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
  36939. int32x2_t __ret;
  36940. __ret = (int32x2_t)(__p0);
  36941. return __ret;
  36942. }
  36943. #endif
  36944. #ifdef __LITTLE_ENDIAN__
  36945. __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
  36946. int32x2_t __ret;
  36947. __ret = (int32x2_t)(__p0);
  36948. return __ret;
  36949. }
  36950. #else
  36951. __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
  36952. int32x2_t __ret;
  36953. __ret = (int32x2_t)(__p0);
  36954. return __ret;
  36955. }
  36956. #endif
  36957. #ifdef __LITTLE_ENDIAN__
  36958. __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
  36959. int32x2_t __ret;
  36960. __ret = (int32x2_t)(__p0);
  36961. return __ret;
  36962. }
  36963. #else
  36964. __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
  36965. int32x2_t __ret;
  36966. __ret = (int32x2_t)(__p0);
  36967. return __ret;
  36968. }
  36969. #endif
  36970. #ifdef __LITTLE_ENDIAN__
  36971. __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
  36972. int32x2_t __ret;
  36973. __ret = (int32x2_t)(__p0);
  36974. return __ret;
  36975. }
  36976. #else
  36977. __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
  36978. int32x2_t __ret;
  36979. __ret = (int32x2_t)(__p0);
  36980. return __ret;
  36981. }
  36982. #endif
  36983. #ifdef __LITTLE_ENDIAN__
  36984. __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
  36985. int32x2_t __ret;
  36986. __ret = (int32x2_t)(__p0);
  36987. return __ret;
  36988. }
  36989. #else
  36990. __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
  36991. int32x2_t __ret;
  36992. __ret = (int32x2_t)(__p0);
  36993. return __ret;
  36994. }
  36995. #endif
  36996. #ifdef __LITTLE_ENDIAN__
  36997. __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
  36998. int32x2_t __ret;
  36999. __ret = (int32x2_t)(__p0);
  37000. return __ret;
  37001. }
  37002. #else
  37003. __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
  37004. int32x2_t __ret;
  37005. __ret = (int32x2_t)(__p0);
  37006. return __ret;
  37007. }
  37008. #endif
  37009. #ifdef __LITTLE_ENDIAN__
  37010. __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
  37011. int64x1_t __ret;
  37012. __ret = (int64x1_t)(__p0);
  37013. return __ret;
  37014. }
  37015. #else
  37016. __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
  37017. int64x1_t __ret;
  37018. __ret = (int64x1_t)(__p0);
  37019. return __ret;
  37020. }
  37021. #endif
  37022. #ifdef __LITTLE_ENDIAN__
  37023. __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
  37024. int64x1_t __ret;
  37025. __ret = (int64x1_t)(__p0);
  37026. return __ret;
  37027. }
  37028. #else
  37029. __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
  37030. int64x1_t __ret;
  37031. __ret = (int64x1_t)(__p0);
  37032. return __ret;
  37033. }
  37034. #endif
  37035. #ifdef __LITTLE_ENDIAN__
  37036. __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
  37037. int64x1_t __ret;
  37038. __ret = (int64x1_t)(__p0);
  37039. return __ret;
  37040. }
  37041. #else
  37042. __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
  37043. int64x1_t __ret;
  37044. __ret = (int64x1_t)(__p0);
  37045. return __ret;
  37046. }
  37047. #endif
  37048. #ifdef __LITTLE_ENDIAN__
  37049. __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
  37050. int64x1_t __ret;
  37051. __ret = (int64x1_t)(__p0);
  37052. return __ret;
  37053. }
  37054. #else
  37055. __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
  37056. int64x1_t __ret;
  37057. __ret = (int64x1_t)(__p0);
  37058. return __ret;
  37059. }
  37060. #endif
  37061. #ifdef __LITTLE_ENDIAN__
  37062. __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
  37063. int64x1_t __ret;
  37064. __ret = (int64x1_t)(__p0);
  37065. return __ret;
  37066. }
  37067. #else
  37068. __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
  37069. int64x1_t __ret;
  37070. __ret = (int64x1_t)(__p0);
  37071. return __ret;
  37072. }
  37073. #endif
  37074. #ifdef __LITTLE_ENDIAN__
  37075. __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
  37076. int64x1_t __ret;
  37077. __ret = (int64x1_t)(__p0);
  37078. return __ret;
  37079. }
  37080. #else
  37081. __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
  37082. int64x1_t __ret;
  37083. __ret = (int64x1_t)(__p0);
  37084. return __ret;
  37085. }
  37086. #endif
  37087. #ifdef __LITTLE_ENDIAN__
  37088. __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
  37089. int64x1_t __ret;
  37090. __ret = (int64x1_t)(__p0);
  37091. return __ret;
  37092. }
  37093. #else
  37094. __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
  37095. int64x1_t __ret;
  37096. __ret = (int64x1_t)(__p0);
  37097. return __ret;
  37098. }
  37099. #endif
  37100. #ifdef __LITTLE_ENDIAN__
  37101. __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
  37102. int64x1_t __ret;
  37103. __ret = (int64x1_t)(__p0);
  37104. return __ret;
  37105. }
  37106. #else
  37107. __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
  37108. int64x1_t __ret;
  37109. __ret = (int64x1_t)(__p0);
  37110. return __ret;
  37111. }
  37112. #endif
  37113. #ifdef __LITTLE_ENDIAN__
  37114. __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
  37115. int64x1_t __ret;
  37116. __ret = (int64x1_t)(__p0);
  37117. return __ret;
  37118. }
  37119. #else
  37120. __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
  37121. int64x1_t __ret;
  37122. __ret = (int64x1_t)(__p0);
  37123. return __ret;
  37124. }
  37125. #endif
  37126. #ifdef __LITTLE_ENDIAN__
  37127. __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
  37128. int64x1_t __ret;
  37129. __ret = (int64x1_t)(__p0);
  37130. return __ret;
  37131. }
  37132. #else
  37133. __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
  37134. int64x1_t __ret;
  37135. __ret = (int64x1_t)(__p0);
  37136. return __ret;
  37137. }
  37138. #endif
  37139. #ifdef __LITTLE_ENDIAN__
  37140. __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
  37141. int64x1_t __ret;
  37142. __ret = (int64x1_t)(__p0);
  37143. return __ret;
  37144. }
  37145. #else
  37146. __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
  37147. int64x1_t __ret;
  37148. __ret = (int64x1_t)(__p0);
  37149. return __ret;
  37150. }
  37151. #endif
  37152. #ifdef __LITTLE_ENDIAN__
  37153. __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
  37154. int64x1_t __ret;
  37155. __ret = (int64x1_t)(__p0);
  37156. return __ret;
  37157. }
  37158. #else
  37159. __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
  37160. int64x1_t __ret;
  37161. __ret = (int64x1_t)(__p0);
  37162. return __ret;
  37163. }
  37164. #endif
  37165. #ifdef __LITTLE_ENDIAN__
  37166. __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
  37167. int64x1_t __ret;
  37168. __ret = (int64x1_t)(__p0);
  37169. return __ret;
  37170. }
  37171. #else
  37172. __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
  37173. int64x1_t __ret;
  37174. __ret = (int64x1_t)(__p0);
  37175. return __ret;
  37176. }
  37177. #endif
  37178. #ifdef __LITTLE_ENDIAN__
  37179. __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
  37180. int16x4_t __ret;
  37181. __ret = (int16x4_t)(__p0);
  37182. return __ret;
  37183. }
  37184. #else
  37185. __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
  37186. int16x4_t __ret;
  37187. __ret = (int16x4_t)(__p0);
  37188. return __ret;
  37189. }
  37190. #endif
  37191. #ifdef __LITTLE_ENDIAN__
  37192. __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
  37193. int16x4_t __ret;
  37194. __ret = (int16x4_t)(__p0);
  37195. return __ret;
  37196. }
  37197. #else
  37198. __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
  37199. int16x4_t __ret;
  37200. __ret = (int16x4_t)(__p0);
  37201. return __ret;
  37202. }
  37203. #endif
  37204. #ifdef __LITTLE_ENDIAN__
  37205. __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
  37206. int16x4_t __ret;
  37207. __ret = (int16x4_t)(__p0);
  37208. return __ret;
  37209. }
  37210. #else
  37211. __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
  37212. int16x4_t __ret;
  37213. __ret = (int16x4_t)(__p0);
  37214. return __ret;
  37215. }
  37216. #endif
  37217. #ifdef __LITTLE_ENDIAN__
  37218. __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
  37219. int16x4_t __ret;
  37220. __ret = (int16x4_t)(__p0);
  37221. return __ret;
  37222. }
  37223. #else
  37224. __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
  37225. int16x4_t __ret;
  37226. __ret = (int16x4_t)(__p0);
  37227. return __ret;
  37228. }
  37229. #endif
  37230. #ifdef __LITTLE_ENDIAN__
  37231. __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
  37232. int16x4_t __ret;
  37233. __ret = (int16x4_t)(__p0);
  37234. return __ret;
  37235. }
  37236. #else
  37237. __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
  37238. int16x4_t __ret;
  37239. __ret = (int16x4_t)(__p0);
  37240. return __ret;
  37241. }
  37242. #endif
  37243. #ifdef __LITTLE_ENDIAN__
  37244. __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
  37245. int16x4_t __ret;
  37246. __ret = (int16x4_t)(__p0);
  37247. return __ret;
  37248. }
  37249. #else
  37250. __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
  37251. int16x4_t __ret;
  37252. __ret = (int16x4_t)(__p0);
  37253. return __ret;
  37254. }
  37255. #endif
  37256. #ifdef __LITTLE_ENDIAN__
  37257. __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
  37258. int16x4_t __ret;
  37259. __ret = (int16x4_t)(__p0);
  37260. return __ret;
  37261. }
  37262. #else
  37263. __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
  37264. int16x4_t __ret;
  37265. __ret = (int16x4_t)(__p0);
  37266. return __ret;
  37267. }
  37268. #endif
  37269. #ifdef __LITTLE_ENDIAN__
  37270. __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
  37271. int16x4_t __ret;
  37272. __ret = (int16x4_t)(__p0);
  37273. return __ret;
  37274. }
  37275. #else
  37276. __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
  37277. int16x4_t __ret;
  37278. __ret = (int16x4_t)(__p0);
  37279. return __ret;
  37280. }
  37281. #endif
  37282. #ifdef __LITTLE_ENDIAN__
  37283. __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
  37284. int16x4_t __ret;
  37285. __ret = (int16x4_t)(__p0);
  37286. return __ret;
  37287. }
  37288. #else
  37289. __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
  37290. int16x4_t __ret;
  37291. __ret = (int16x4_t)(__p0);
  37292. return __ret;
  37293. }
  37294. #endif
  37295. #ifdef __LITTLE_ENDIAN__
  37296. __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
  37297. int16x4_t __ret;
  37298. __ret = (int16x4_t)(__p0);
  37299. return __ret;
  37300. }
  37301. #else
  37302. __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
  37303. int16x4_t __ret;
  37304. __ret = (int16x4_t)(__p0);
  37305. return __ret;
  37306. }
  37307. #endif
  37308. #ifdef __LITTLE_ENDIAN__
  37309. __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
  37310. int16x4_t __ret;
  37311. __ret = (int16x4_t)(__p0);
  37312. return __ret;
  37313. }
  37314. #else
  37315. __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
  37316. int16x4_t __ret;
  37317. __ret = (int16x4_t)(__p0);
  37318. return __ret;
  37319. }
  37320. #endif
  37321. #ifdef __LITTLE_ENDIAN__
  37322. __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
  37323. int16x4_t __ret;
  37324. __ret = (int16x4_t)(__p0);
  37325. return __ret;
  37326. }
  37327. #else
  37328. __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
  37329. int16x4_t __ret;
  37330. __ret = (int16x4_t)(__p0);
  37331. return __ret;
  37332. }
  37333. #endif
  37334. #ifdef __LITTLE_ENDIAN__
  37335. __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
  37336. int16x4_t __ret;
  37337. __ret = (int16x4_t)(__p0);
  37338. return __ret;
  37339. }
  37340. #else
  37341. __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
  37342. int16x4_t __ret;
  37343. __ret = (int16x4_t)(__p0);
  37344. return __ret;
  37345. }
  37346. #endif
  37347. #endif
  37348. #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
  37349. #ifdef __LITTLE_ENDIAN__
  37350. __ai float64x2_t vrndq_f64(float64x2_t __p0) {
  37351. float64x2_t __ret;
  37352. __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
  37353. return __ret;
  37354. }
  37355. #else
  37356. __ai float64x2_t vrndq_f64(float64x2_t __p0) {
  37357. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37358. float64x2_t __ret;
  37359. __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
  37360. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37361. return __ret;
  37362. }
  37363. #endif
  37364. #ifdef __LITTLE_ENDIAN__
  37365. __ai float64x1_t vrnd_f64(float64x1_t __p0) {
  37366. float64x1_t __ret;
  37367. __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
  37368. return __ret;
  37369. }
  37370. #else
  37371. __ai float64x1_t vrnd_f64(float64x1_t __p0) {
  37372. float64x1_t __ret;
  37373. __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
  37374. return __ret;
  37375. }
  37376. #endif
  37377. #ifdef __LITTLE_ENDIAN__
  37378. __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
  37379. float64x2_t __ret;
  37380. __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
  37381. return __ret;
  37382. }
  37383. #else
  37384. __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
  37385. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37386. float64x2_t __ret;
  37387. __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
  37388. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37389. return __ret;
  37390. }
  37391. #endif
  37392. #ifdef __LITTLE_ENDIAN__
  37393. __ai float64x1_t vrnda_f64(float64x1_t __p0) {
  37394. float64x1_t __ret;
  37395. __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
  37396. return __ret;
  37397. }
  37398. #else
  37399. __ai float64x1_t vrnda_f64(float64x1_t __p0) {
  37400. float64x1_t __ret;
  37401. __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
  37402. return __ret;
  37403. }
  37404. #endif
  37405. #ifdef __LITTLE_ENDIAN__
  37406. __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
  37407. float64x2_t __ret;
  37408. __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
  37409. return __ret;
  37410. }
  37411. #else
  37412. __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
  37413. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37414. float64x2_t __ret;
  37415. __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
  37416. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37417. return __ret;
  37418. }
  37419. #endif
  37420. #ifdef __LITTLE_ENDIAN__
  37421. __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
  37422. float32x4_t __ret;
  37423. __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
  37424. return __ret;
  37425. }
  37426. #else
  37427. __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
  37428. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37429. float32x4_t __ret;
  37430. __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
  37431. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37432. return __ret;
  37433. }
  37434. #endif
  37435. #ifdef __LITTLE_ENDIAN__
  37436. __ai float64x1_t vrndi_f64(float64x1_t __p0) {
  37437. float64x1_t __ret;
  37438. __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
  37439. return __ret;
  37440. }
  37441. #else
  37442. __ai float64x1_t vrndi_f64(float64x1_t __p0) {
  37443. float64x1_t __ret;
  37444. __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
  37445. return __ret;
  37446. }
  37447. #endif
  37448. #ifdef __LITTLE_ENDIAN__
  37449. __ai float32x2_t vrndi_f32(float32x2_t __p0) {
  37450. float32x2_t __ret;
  37451. __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
  37452. return __ret;
  37453. }
  37454. #else
  37455. __ai float32x2_t vrndi_f32(float32x2_t __p0) {
  37456. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37457. float32x2_t __ret;
  37458. __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
  37459. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37460. return __ret;
  37461. }
  37462. #endif
  37463. #ifdef __LITTLE_ENDIAN__
  37464. __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
  37465. float64x2_t __ret;
  37466. __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
  37467. return __ret;
  37468. }
  37469. #else
  37470. __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
  37471. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37472. float64x2_t __ret;
  37473. __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
  37474. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37475. return __ret;
  37476. }
  37477. #endif
  37478. #ifdef __LITTLE_ENDIAN__
  37479. __ai float64x1_t vrndm_f64(float64x1_t __p0) {
  37480. float64x1_t __ret;
  37481. __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
  37482. return __ret;
  37483. }
  37484. #else
  37485. __ai float64x1_t vrndm_f64(float64x1_t __p0) {
  37486. float64x1_t __ret;
  37487. __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
  37488. return __ret;
  37489. }
  37490. #endif
  37491. #ifdef __LITTLE_ENDIAN__
  37492. __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
  37493. float64x2_t __ret;
  37494. __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
  37495. return __ret;
  37496. }
  37497. #else
  37498. __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
  37499. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37500. float64x2_t __ret;
  37501. __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
  37502. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37503. return __ret;
  37504. }
  37505. #endif
  37506. #ifdef __LITTLE_ENDIAN__
  37507. __ai float64x1_t vrndn_f64(float64x1_t __p0) {
  37508. float64x1_t __ret;
  37509. __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
  37510. return __ret;
  37511. }
  37512. #else
  37513. __ai float64x1_t vrndn_f64(float64x1_t __p0) {
  37514. float64x1_t __ret;
  37515. __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
  37516. return __ret;
  37517. }
  37518. #endif
  37519. #ifdef __LITTLE_ENDIAN__
  37520. __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
  37521. float64x2_t __ret;
  37522. __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
  37523. return __ret;
  37524. }
  37525. #else
  37526. __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
  37527. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37528. float64x2_t __ret;
  37529. __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
  37530. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37531. return __ret;
  37532. }
  37533. #endif
  37534. #ifdef __LITTLE_ENDIAN__
  37535. __ai float64x1_t vrndp_f64(float64x1_t __p0) {
  37536. float64x1_t __ret;
  37537. __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
  37538. return __ret;
  37539. }
  37540. #else
  37541. __ai float64x1_t vrndp_f64(float64x1_t __p0) {
  37542. float64x1_t __ret;
  37543. __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
  37544. return __ret;
  37545. }
  37546. #endif
  37547. #ifdef __LITTLE_ENDIAN__
  37548. __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
  37549. float64x2_t __ret;
  37550. __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
  37551. return __ret;
  37552. }
  37553. #else
  37554. __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
  37555. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37556. float64x2_t __ret;
  37557. __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
  37558. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37559. return __ret;
  37560. }
  37561. #endif
  37562. #ifdef __LITTLE_ENDIAN__
  37563. __ai float64x1_t vrndx_f64(float64x1_t __p0) {
  37564. float64x1_t __ret;
  37565. __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
  37566. return __ret;
  37567. }
  37568. #else
  37569. __ai float64x1_t vrndx_f64(float64x1_t __p0) {
  37570. float64x1_t __ret;
  37571. __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
  37572. return __ret;
  37573. }
  37574. #endif
  37575. #endif
  37576. #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
  37577. #ifdef __LITTLE_ENDIAN__
  37578. __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  37579. float64x2_t __ret;
  37580. __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  37581. return __ret;
  37582. }
  37583. #else
  37584. __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  37585. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37586. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  37587. float64x2_t __ret;
  37588. __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  37589. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37590. return __ret;
  37591. }
  37592. #endif
  37593. #ifdef __LITTLE_ENDIAN__
  37594. __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
  37595. float64x1_t __ret;
  37596. __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  37597. return __ret;
  37598. }
  37599. #else
  37600. __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
  37601. float64x1_t __ret;
  37602. __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  37603. return __ret;
  37604. }
  37605. #endif
  37606. #ifdef __LITTLE_ENDIAN__
  37607. __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  37608. float64x2_t __ret;
  37609. __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  37610. return __ret;
  37611. }
  37612. #else
  37613. __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  37614. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37615. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  37616. float64x2_t __ret;
  37617. __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  37618. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37619. return __ret;
  37620. }
  37621. #endif
  37622. #ifdef __LITTLE_ENDIAN__
  37623. __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
  37624. float64x1_t __ret;
  37625. __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  37626. return __ret;
  37627. }
  37628. #else
  37629. __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
  37630. float64x1_t __ret;
  37631. __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  37632. return __ret;
  37633. }
  37634. #endif
  37635. #endif
  37636. #if __ARM_FEATURE_CRYPTO
  37637. #ifdef __LITTLE_ENDIAN__
  37638. __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  37639. uint8x16_t __ret;
  37640. __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  37641. return __ret;
  37642. }
  37643. #else
  37644. __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  37645. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37646. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37647. uint8x16_t __ret;
  37648. __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  37649. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37650. return __ret;
  37651. }
  37652. #endif
  37653. #ifdef __LITTLE_ENDIAN__
  37654. __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  37655. uint8x16_t __ret;
  37656. __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  37657. return __ret;
  37658. }
  37659. #else
  37660. __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  37661. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37662. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37663. uint8x16_t __ret;
  37664. __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  37665. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37666. return __ret;
  37667. }
  37668. #endif
  37669. #ifdef __LITTLE_ENDIAN__
  37670. __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
  37671. uint8x16_t __ret;
  37672. __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
  37673. return __ret;
  37674. }
  37675. #else
  37676. __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
  37677. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37678. uint8x16_t __ret;
  37679. __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
  37680. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37681. return __ret;
  37682. }
  37683. #endif
  37684. #ifdef __LITTLE_ENDIAN__
  37685. __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
  37686. uint8x16_t __ret;
  37687. __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
  37688. return __ret;
  37689. }
  37690. #else
  37691. __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
  37692. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37693. uint8x16_t __ret;
  37694. __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
  37695. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  37696. return __ret;
  37697. }
  37698. #endif
  37699. #ifdef __LITTLE_ENDIAN__
  37700. __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37701. uint32x4_t __ret;
  37702. __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
  37703. return __ret;
  37704. }
  37705. #else
  37706. __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37707. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37708. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37709. uint32x4_t __ret;
  37710. __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
  37711. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37712. return __ret;
  37713. }
  37714. #endif
  37715. #ifdef __LITTLE_ENDIAN__
  37716. __ai uint32_t vsha1h_u32(uint32_t __p0) {
  37717. uint32_t __ret;
  37718. __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
  37719. return __ret;
  37720. }
  37721. #else
  37722. __ai uint32_t vsha1h_u32(uint32_t __p0) {
  37723. uint32_t __ret;
  37724. __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
  37725. return __ret;
  37726. }
  37727. #endif
  37728. #ifdef __LITTLE_ENDIAN__
  37729. __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37730. uint32x4_t __ret;
  37731. __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
  37732. return __ret;
  37733. }
  37734. #else
  37735. __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37736. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37737. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37738. uint32x4_t __ret;
  37739. __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
  37740. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37741. return __ret;
  37742. }
  37743. #endif
  37744. #ifdef __LITTLE_ENDIAN__
  37745. __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37746. uint32x4_t __ret;
  37747. __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
  37748. return __ret;
  37749. }
  37750. #else
  37751. __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
  37752. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37753. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37754. uint32x4_t __ret;
  37755. __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
  37756. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37757. return __ret;
  37758. }
  37759. #endif
  37760. #ifdef __LITTLE_ENDIAN__
  37761. __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37762. uint32x4_t __ret;
  37763. __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
  37764. return __ret;
  37765. }
  37766. #else
  37767. __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37768. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37769. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37770. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37771. uint32x4_t __ret;
  37772. __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
  37773. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37774. return __ret;
  37775. }
  37776. #endif
  37777. #ifdef __LITTLE_ENDIAN__
  37778. __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  37779. uint32x4_t __ret;
  37780. __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  37781. return __ret;
  37782. }
  37783. #else
  37784. __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  37785. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37786. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37787. uint32x4_t __ret;
  37788. __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  37789. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37790. return __ret;
  37791. }
  37792. #endif
  37793. #ifdef __LITTLE_ENDIAN__
  37794. __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37795. uint32x4_t __ret;
  37796. __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
  37797. return __ret;
  37798. }
  37799. #else
  37800. __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37801. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37802. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37803. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37804. uint32x4_t __ret;
  37805. __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
  37806. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37807. return __ret;
  37808. }
  37809. #endif
  37810. #ifdef __LITTLE_ENDIAN__
  37811. __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37812. uint32x4_t __ret;
  37813. __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
  37814. return __ret;
  37815. }
  37816. #else
  37817. __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37818. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37819. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37820. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37821. uint32x4_t __ret;
  37822. __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
  37823. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37824. return __ret;
  37825. }
  37826. #endif
  37827. #ifdef __LITTLE_ENDIAN__
  37828. __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  37829. uint32x4_t __ret;
  37830. __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  37831. return __ret;
  37832. }
  37833. #else
  37834. __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  37835. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37836. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37837. uint32x4_t __ret;
  37838. __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  37839. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37840. return __ret;
  37841. }
  37842. #endif
  37843. #ifdef __LITTLE_ENDIAN__
  37844. __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37845. uint32x4_t __ret;
  37846. __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
  37847. return __ret;
  37848. }
  37849. #else
  37850. __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  37851. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37852. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37853. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37854. uint32x4_t __ret;
  37855. __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
  37856. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37857. return __ret;
  37858. }
  37859. #endif
  37860. #endif
  37861. #if defined(__ARM_FEATURE_FMA)
  37862. #ifdef __LITTLE_ENDIAN__
  37863. __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  37864. float32x4_t __ret;
  37865. __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
  37866. return __ret;
  37867. }
  37868. #else
  37869. __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  37870. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37871. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37872. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37873. float32x4_t __ret;
  37874. __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
  37875. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37876. return __ret;
  37877. }
  37878. __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  37879. float32x4_t __ret;
  37880. __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
  37881. return __ret;
  37882. }
  37883. #endif
  37884. #ifdef __LITTLE_ENDIAN__
  37885. __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  37886. float32x2_t __ret;
  37887. __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
  37888. return __ret;
  37889. }
  37890. #else
  37891. __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  37892. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37893. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  37894. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  37895. float32x2_t __ret;
  37896. __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
  37897. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37898. return __ret;
  37899. }
  37900. __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  37901. float32x2_t __ret;
  37902. __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
  37903. return __ret;
  37904. }
  37905. #endif
  37906. #ifdef __LITTLE_ENDIAN__
  37907. __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  37908. float32x4_t __ret;
  37909. __ret = vfmaq_f32(__p0, -__p1, __p2);
  37910. return __ret;
  37911. }
  37912. #else
  37913. __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
  37914. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37915. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37916. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  37917. float32x4_t __ret;
  37918. __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
  37919. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37920. return __ret;
  37921. }
  37922. #endif
  37923. #ifdef __LITTLE_ENDIAN__
  37924. __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  37925. float32x2_t __ret;
  37926. __ret = vfma_f32(__p0, -__p1, __p2);
  37927. return __ret;
  37928. }
  37929. #else
  37930. __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
  37931. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  37932. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  37933. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  37934. float32x2_t __ret;
  37935. __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
  37936. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  37937. return __ret;
  37938. }
  37939. #endif
  37940. #endif
  37941. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
  37942. #ifdef __LITTLE_ENDIAN__
  37943. __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
  37944. float16x8_t __ret;
  37945. __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  37946. return __ret;
  37947. }
  37948. #else
  37949. __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
  37950. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  37951. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  37952. float16x8_t __ret;
  37953. __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  37954. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  37955. return __ret;
  37956. }
  37957. #endif
  37958. #ifdef __LITTLE_ENDIAN__
  37959. __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
  37960. float16x4_t __ret;
  37961. __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  37962. return __ret;
  37963. }
  37964. #else
  37965. __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
  37966. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37967. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  37968. float16x4_t __ret;
  37969. __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  37970. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  37971. return __ret;
  37972. }
  37973. #endif
  37974. #ifdef __LITTLE_ENDIAN__
  37975. __ai float16x8_t vabsq_f16(float16x8_t __p0) {
  37976. float16x8_t __ret;
  37977. __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
  37978. return __ret;
  37979. }
  37980. #else
  37981. __ai float16x8_t vabsq_f16(float16x8_t __p0) {
  37982. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  37983. float16x8_t __ret;
  37984. __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
  37985. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  37986. return __ret;
  37987. }
  37988. #endif
  37989. #ifdef __LITTLE_ENDIAN__
  37990. __ai float16x4_t vabs_f16(float16x4_t __p0) {
  37991. float16x4_t __ret;
  37992. __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
  37993. return __ret;
  37994. }
  37995. #else
  37996. __ai float16x4_t vabs_f16(float16x4_t __p0) {
  37997. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  37998. float16x4_t __ret;
  37999. __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
  38000. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38001. return __ret;
  38002. }
  38003. #endif
  38004. #ifdef __LITTLE_ENDIAN__
  38005. __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
  38006. float16x8_t __ret;
  38007. __ret = __p0 + __p1;
  38008. return __ret;
  38009. }
  38010. #else
  38011. __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
  38012. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38013. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38014. float16x8_t __ret;
  38015. __ret = __rev0 + __rev1;
  38016. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38017. return __ret;
  38018. }
  38019. #endif
  38020. #ifdef __LITTLE_ENDIAN__
  38021. __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
  38022. float16x4_t __ret;
  38023. __ret = __p0 + __p1;
  38024. return __ret;
  38025. }
  38026. #else
  38027. __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
  38028. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38029. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38030. float16x4_t __ret;
  38031. __ret = __rev0 + __rev1;
  38032. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38033. return __ret;
  38034. }
  38035. #endif
  38036. #ifdef __LITTLE_ENDIAN__
  38037. __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  38038. float16x8_t __ret;
  38039. __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
  38040. return __ret;
  38041. }
  38042. #else
  38043. __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  38044. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38045. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38046. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  38047. float16x8_t __ret;
  38048. __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
  38049. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38050. return __ret;
  38051. }
  38052. #endif
  38053. #ifdef __LITTLE_ENDIAN__
  38054. __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  38055. float16x4_t __ret;
  38056. __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
  38057. return __ret;
  38058. }
  38059. #else
  38060. __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  38061. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38062. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38063. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  38064. float16x4_t __ret;
  38065. __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
  38066. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38067. return __ret;
  38068. }
  38069. #endif
  38070. #ifdef __LITTLE_ENDIAN__
  38071. __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
  38072. uint16x8_t __ret;
  38073. __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  38074. return __ret;
  38075. }
  38076. #else
  38077. __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
  38078. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38079. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38080. uint16x8_t __ret;
  38081. __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  38082. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38083. return __ret;
  38084. }
  38085. #endif
  38086. #ifdef __LITTLE_ENDIAN__
  38087. __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
  38088. uint16x4_t __ret;
  38089. __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  38090. return __ret;
  38091. }
  38092. #else
  38093. __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
  38094. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38095. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38096. uint16x4_t __ret;
  38097. __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  38098. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38099. return __ret;
  38100. }
  38101. #endif
  38102. #ifdef __LITTLE_ENDIAN__
  38103. __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
  38104. uint16x8_t __ret;
  38105. __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  38106. return __ret;
  38107. }
  38108. #else
  38109. __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
  38110. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38111. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38112. uint16x8_t __ret;
  38113. __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  38114. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38115. return __ret;
  38116. }
  38117. #endif
  38118. #ifdef __LITTLE_ENDIAN__
  38119. __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
  38120. uint16x4_t __ret;
  38121. __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  38122. return __ret;
  38123. }
  38124. #else
  38125. __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
  38126. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38127. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38128. uint16x4_t __ret;
  38129. __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  38130. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38131. return __ret;
  38132. }
  38133. #endif
  38134. #ifdef __LITTLE_ENDIAN__
  38135. __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
  38136. uint16x8_t __ret;
  38137. __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  38138. return __ret;
  38139. }
  38140. #else
  38141. __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
  38142. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38143. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38144. uint16x8_t __ret;
  38145. __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  38146. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38147. return __ret;
  38148. }
  38149. #endif
  38150. #ifdef __LITTLE_ENDIAN__
  38151. __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
  38152. uint16x4_t __ret;
  38153. __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  38154. return __ret;
  38155. }
  38156. #else
  38157. __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
  38158. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38159. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38160. uint16x4_t __ret;
  38161. __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  38162. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38163. return __ret;
  38164. }
  38165. #endif
  38166. #ifdef __LITTLE_ENDIAN__
  38167. __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
  38168. uint16x8_t __ret;
  38169. __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  38170. return __ret;
  38171. }
  38172. #else
  38173. __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
  38174. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38175. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38176. uint16x8_t __ret;
  38177. __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  38178. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38179. return __ret;
  38180. }
  38181. #endif
  38182. #ifdef __LITTLE_ENDIAN__
  38183. __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
  38184. uint16x4_t __ret;
  38185. __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  38186. return __ret;
  38187. }
  38188. #else
  38189. __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
  38190. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38191. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38192. uint16x4_t __ret;
  38193. __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  38194. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38195. return __ret;
  38196. }
  38197. #endif
  38198. #ifdef __LITTLE_ENDIAN__
  38199. __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
  38200. uint16x8_t __ret;
  38201. __ret = (uint16x8_t)(__p0 == __p1);
  38202. return __ret;
  38203. }
  38204. #else
  38205. __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
  38206. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38207. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38208. uint16x8_t __ret;
  38209. __ret = (uint16x8_t)(__rev0 == __rev1);
  38210. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38211. return __ret;
  38212. }
  38213. #endif
  38214. #ifdef __LITTLE_ENDIAN__
  38215. __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
  38216. uint16x4_t __ret;
  38217. __ret = (uint16x4_t)(__p0 == __p1);
  38218. return __ret;
  38219. }
  38220. #else
  38221. __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
  38222. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38223. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38224. uint16x4_t __ret;
  38225. __ret = (uint16x4_t)(__rev0 == __rev1);
  38226. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38227. return __ret;
  38228. }
  38229. #endif
  38230. #ifdef __LITTLE_ENDIAN__
  38231. __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
  38232. uint16x8_t __ret;
  38233. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
  38234. return __ret;
  38235. }
  38236. #else
  38237. __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
  38238. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38239. uint16x8_t __ret;
  38240. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
  38241. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38242. return __ret;
  38243. }
  38244. #endif
  38245. #ifdef __LITTLE_ENDIAN__
  38246. __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
  38247. uint16x4_t __ret;
  38248. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
  38249. return __ret;
  38250. }
  38251. #else
  38252. __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
  38253. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38254. uint16x4_t __ret;
  38255. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
  38256. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38257. return __ret;
  38258. }
  38259. #endif
  38260. #ifdef __LITTLE_ENDIAN__
  38261. __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
  38262. uint16x8_t __ret;
  38263. __ret = (uint16x8_t)(__p0 >= __p1);
  38264. return __ret;
  38265. }
  38266. #else
  38267. __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
  38268. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38269. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38270. uint16x8_t __ret;
  38271. __ret = (uint16x8_t)(__rev0 >= __rev1);
  38272. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38273. return __ret;
  38274. }
  38275. #endif
  38276. #ifdef __LITTLE_ENDIAN__
  38277. __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
  38278. uint16x4_t __ret;
  38279. __ret = (uint16x4_t)(__p0 >= __p1);
  38280. return __ret;
  38281. }
  38282. #else
  38283. __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
  38284. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38285. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38286. uint16x4_t __ret;
  38287. __ret = (uint16x4_t)(__rev0 >= __rev1);
  38288. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38289. return __ret;
  38290. }
  38291. #endif
  38292. #ifdef __LITTLE_ENDIAN__
  38293. __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
  38294. uint16x8_t __ret;
  38295. __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
  38296. return __ret;
  38297. }
  38298. #else
  38299. __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
  38300. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38301. uint16x8_t __ret;
  38302. __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
  38303. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38304. return __ret;
  38305. }
  38306. #endif
  38307. #ifdef __LITTLE_ENDIAN__
  38308. __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
  38309. uint16x4_t __ret;
  38310. __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
  38311. return __ret;
  38312. }
  38313. #else
  38314. __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
  38315. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38316. uint16x4_t __ret;
  38317. __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
  38318. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38319. return __ret;
  38320. }
  38321. #endif
  38322. #ifdef __LITTLE_ENDIAN__
  38323. __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
  38324. uint16x8_t __ret;
  38325. __ret = (uint16x8_t)(__p0 > __p1);
  38326. return __ret;
  38327. }
  38328. #else
  38329. __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
  38330. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38331. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38332. uint16x8_t __ret;
  38333. __ret = (uint16x8_t)(__rev0 > __rev1);
  38334. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38335. return __ret;
  38336. }
  38337. #endif
  38338. #ifdef __LITTLE_ENDIAN__
  38339. __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
  38340. uint16x4_t __ret;
  38341. __ret = (uint16x4_t)(__p0 > __p1);
  38342. return __ret;
  38343. }
  38344. #else
  38345. __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
  38346. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38347. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38348. uint16x4_t __ret;
  38349. __ret = (uint16x4_t)(__rev0 > __rev1);
  38350. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38351. return __ret;
  38352. }
  38353. #endif
  38354. #ifdef __LITTLE_ENDIAN__
  38355. __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
  38356. uint16x8_t __ret;
  38357. __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
  38358. return __ret;
  38359. }
  38360. #else
  38361. __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
  38362. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38363. uint16x8_t __ret;
  38364. __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
  38365. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38366. return __ret;
  38367. }
  38368. #endif
  38369. #ifdef __LITTLE_ENDIAN__
  38370. __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
  38371. uint16x4_t __ret;
  38372. __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
  38373. return __ret;
  38374. }
  38375. #else
  38376. __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
  38377. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38378. uint16x4_t __ret;
  38379. __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
  38380. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38381. return __ret;
  38382. }
  38383. #endif
  38384. #ifdef __LITTLE_ENDIAN__
  38385. __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
  38386. uint16x8_t __ret;
  38387. __ret = (uint16x8_t)(__p0 <= __p1);
  38388. return __ret;
  38389. }
  38390. #else
  38391. __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
  38392. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38393. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38394. uint16x8_t __ret;
  38395. __ret = (uint16x8_t)(__rev0 <= __rev1);
  38396. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38397. return __ret;
  38398. }
  38399. #endif
  38400. #ifdef __LITTLE_ENDIAN__
  38401. __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
  38402. uint16x4_t __ret;
  38403. __ret = (uint16x4_t)(__p0 <= __p1);
  38404. return __ret;
  38405. }
  38406. #else
  38407. __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
  38408. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38409. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38410. uint16x4_t __ret;
  38411. __ret = (uint16x4_t)(__rev0 <= __rev1);
  38412. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38413. return __ret;
  38414. }
  38415. #endif
  38416. #ifdef __LITTLE_ENDIAN__
  38417. __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
  38418. uint16x8_t __ret;
  38419. __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
  38420. return __ret;
  38421. }
  38422. #else
  38423. __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
  38424. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38425. uint16x8_t __ret;
  38426. __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
  38427. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38428. return __ret;
  38429. }
  38430. #endif
  38431. #ifdef __LITTLE_ENDIAN__
  38432. __ai uint16x4_t vclez_f16(float16x4_t __p0) {
  38433. uint16x4_t __ret;
  38434. __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
  38435. return __ret;
  38436. }
  38437. #else
  38438. __ai uint16x4_t vclez_f16(float16x4_t __p0) {
  38439. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38440. uint16x4_t __ret;
  38441. __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
  38442. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38443. return __ret;
  38444. }
  38445. #endif
  38446. #ifdef __LITTLE_ENDIAN__
  38447. __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
  38448. uint16x8_t __ret;
  38449. __ret = (uint16x8_t)(__p0 < __p1);
  38450. return __ret;
  38451. }
  38452. #else
  38453. __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
  38454. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38455. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  38456. uint16x8_t __ret;
  38457. __ret = (uint16x8_t)(__rev0 < __rev1);
  38458. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38459. return __ret;
  38460. }
  38461. #endif
  38462. #ifdef __LITTLE_ENDIAN__
  38463. __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
  38464. uint16x4_t __ret;
  38465. __ret = (uint16x4_t)(__p0 < __p1);
  38466. return __ret;
  38467. }
  38468. #else
  38469. __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
  38470. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38471. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  38472. uint16x4_t __ret;
  38473. __ret = (uint16x4_t)(__rev0 < __rev1);
  38474. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38475. return __ret;
  38476. }
  38477. #endif
  38478. #ifdef __LITTLE_ENDIAN__
  38479. __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
  38480. uint16x8_t __ret;
  38481. __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
  38482. return __ret;
  38483. }
  38484. #else
  38485. __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
  38486. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38487. uint16x8_t __ret;
  38488. __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
  38489. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38490. return __ret;
  38491. }
  38492. #endif
  38493. #ifdef __LITTLE_ENDIAN__
  38494. __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
  38495. uint16x4_t __ret;
  38496. __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
  38497. return __ret;
  38498. }
  38499. #else
  38500. __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
  38501. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38502. uint16x4_t __ret;
  38503. __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
  38504. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38505. return __ret;
  38506. }
  38507. #endif
  38508. #ifdef __LITTLE_ENDIAN__
  38509. __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
  38510. float16x8_t __ret;
  38511. __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
  38512. return __ret;
  38513. }
  38514. #else
  38515. __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
  38516. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38517. float16x8_t __ret;
  38518. __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
  38519. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38520. return __ret;
  38521. }
  38522. #endif
  38523. #ifdef __LITTLE_ENDIAN__
  38524. __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
  38525. float16x8_t __ret;
  38526. __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
  38527. return __ret;
  38528. }
  38529. #else
  38530. __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
  38531. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38532. float16x8_t __ret;
  38533. __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
  38534. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38535. return __ret;
  38536. }
  38537. #endif
  38538. #ifdef __LITTLE_ENDIAN__
  38539. __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
  38540. float16x4_t __ret;
  38541. __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
  38542. return __ret;
  38543. }
  38544. #else
  38545. __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
  38546. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38547. float16x4_t __ret;
  38548. __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
  38549. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38550. return __ret;
  38551. }
  38552. #endif
  38553. #ifdef __LITTLE_ENDIAN__
  38554. __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
  38555. float16x4_t __ret;
  38556. __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
  38557. return __ret;
  38558. }
  38559. #else
  38560. __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
  38561. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38562. float16x4_t __ret;
  38563. __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
  38564. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38565. return __ret;
  38566. }
  38567. #endif
  38568. #ifdef __LITTLE_ENDIAN__
  38569. #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
  38570. uint16x8_t __s0 = __p0; \
  38571. float16x8_t __ret; \
  38572. __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
  38573. __ret; \
  38574. })
  38575. #else
  38576. #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
  38577. uint16x8_t __s0 = __p0; \
  38578. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  38579. float16x8_t __ret; \
  38580. __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
  38581. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  38582. __ret; \
  38583. })
  38584. #endif
  38585. #ifdef __LITTLE_ENDIAN__
  38586. #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
  38587. int16x8_t __s0 = __p0; \
  38588. float16x8_t __ret; \
  38589. __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
  38590. __ret; \
  38591. })
  38592. #else
  38593. #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
  38594. int16x8_t __s0 = __p0; \
  38595. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  38596. float16x8_t __ret; \
  38597. __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
  38598. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  38599. __ret; \
  38600. })
  38601. #endif
  38602. #ifdef __LITTLE_ENDIAN__
  38603. #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
  38604. uint16x4_t __s0 = __p0; \
  38605. float16x4_t __ret; \
  38606. __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
  38607. __ret; \
  38608. })
  38609. #else
  38610. #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
  38611. uint16x4_t __s0 = __p0; \
  38612. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  38613. float16x4_t __ret; \
  38614. __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
  38615. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  38616. __ret; \
  38617. })
  38618. #endif
  38619. #ifdef __LITTLE_ENDIAN__
  38620. #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
  38621. int16x4_t __s0 = __p0; \
  38622. float16x4_t __ret; \
  38623. __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
  38624. __ret; \
  38625. })
  38626. #else
  38627. #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
  38628. int16x4_t __s0 = __p0; \
  38629. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  38630. float16x4_t __ret; \
  38631. __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
  38632. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  38633. __ret; \
  38634. })
  38635. #endif
  38636. #ifdef __LITTLE_ENDIAN__
  38637. #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
  38638. float16x8_t __s0 = __p0; \
  38639. int16x8_t __ret; \
  38640. __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
  38641. __ret; \
  38642. })
  38643. #else
  38644. #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
  38645. float16x8_t __s0 = __p0; \
  38646. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  38647. int16x8_t __ret; \
  38648. __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
  38649. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  38650. __ret; \
  38651. })
  38652. #endif
  38653. #ifdef __LITTLE_ENDIAN__
  38654. #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
  38655. float16x4_t __s0 = __p0; \
  38656. int16x4_t __ret; \
  38657. __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
  38658. __ret; \
  38659. })
  38660. #else
  38661. #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
  38662. float16x4_t __s0 = __p0; \
  38663. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  38664. int16x4_t __ret; \
  38665. __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
  38666. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  38667. __ret; \
  38668. })
  38669. #endif
  38670. #ifdef __LITTLE_ENDIAN__
  38671. #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
  38672. float16x8_t __s0 = __p0; \
  38673. uint16x8_t __ret; \
  38674. __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
  38675. __ret; \
  38676. })
  38677. #else
  38678. #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
  38679. float16x8_t __s0 = __p0; \
  38680. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  38681. uint16x8_t __ret; \
  38682. __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
  38683. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  38684. __ret; \
  38685. })
  38686. #endif
  38687. #ifdef __LITTLE_ENDIAN__
  38688. #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
  38689. float16x4_t __s0 = __p0; \
  38690. uint16x4_t __ret; \
  38691. __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
  38692. __ret; \
  38693. })
  38694. #else
  38695. #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
  38696. float16x4_t __s0 = __p0; \
  38697. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  38698. uint16x4_t __ret; \
  38699. __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
  38700. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  38701. __ret; \
  38702. })
  38703. #endif
  38704. #ifdef __LITTLE_ENDIAN__
  38705. __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
  38706. int16x8_t __ret;
  38707. __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
  38708. return __ret;
  38709. }
  38710. #else
  38711. __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
  38712. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38713. int16x8_t __ret;
  38714. __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
  38715. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38716. return __ret;
  38717. }
  38718. #endif
  38719. #ifdef __LITTLE_ENDIAN__
  38720. __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
  38721. int16x4_t __ret;
  38722. __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
  38723. return __ret;
  38724. }
  38725. #else
  38726. __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
  38727. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38728. int16x4_t __ret;
  38729. __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
  38730. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38731. return __ret;
  38732. }
  38733. #endif
  38734. #ifdef __LITTLE_ENDIAN__
  38735. __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
  38736. uint16x8_t __ret;
  38737. __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
  38738. return __ret;
  38739. }
  38740. #else
  38741. __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
  38742. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38743. uint16x8_t __ret;
  38744. __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
  38745. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38746. return __ret;
  38747. }
  38748. #endif
  38749. #ifdef __LITTLE_ENDIAN__
  38750. __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
  38751. uint16x4_t __ret;
  38752. __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
  38753. return __ret;
  38754. }
  38755. #else
  38756. __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
  38757. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38758. uint16x4_t __ret;
  38759. __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
  38760. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38761. return __ret;
  38762. }
  38763. #endif
  38764. #ifdef __LITTLE_ENDIAN__
  38765. __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
  38766. int16x8_t __ret;
  38767. __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
  38768. return __ret;
  38769. }
  38770. #else
  38771. __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
  38772. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38773. int16x8_t __ret;
  38774. __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
  38775. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38776. return __ret;
  38777. }
  38778. #endif
  38779. #ifdef __LITTLE_ENDIAN__
  38780. __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
  38781. int16x4_t __ret;
  38782. __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
  38783. return __ret;
  38784. }
  38785. #else
  38786. __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
  38787. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38788. int16x4_t __ret;
  38789. __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
  38790. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38791. return __ret;
  38792. }
  38793. #endif
  38794. #ifdef __LITTLE_ENDIAN__
  38795. __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
  38796. uint16x8_t __ret;
  38797. __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
  38798. return __ret;
  38799. }
  38800. #else
  38801. __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
  38802. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38803. uint16x8_t __ret;
  38804. __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
  38805. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38806. return __ret;
  38807. }
  38808. #endif
  38809. #ifdef __LITTLE_ENDIAN__
  38810. __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
  38811. uint16x4_t __ret;
  38812. __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
  38813. return __ret;
  38814. }
  38815. #else
  38816. __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
  38817. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38818. uint16x4_t __ret;
  38819. __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
  38820. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38821. return __ret;
  38822. }
  38823. #endif
  38824. #ifdef __LITTLE_ENDIAN__
  38825. __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
  38826. int16x8_t __ret;
  38827. __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
  38828. return __ret;
  38829. }
  38830. #else
  38831. __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
  38832. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38833. int16x8_t __ret;
  38834. __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
  38835. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38836. return __ret;
  38837. }
  38838. #endif
  38839. #ifdef __LITTLE_ENDIAN__
  38840. __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
  38841. int16x4_t __ret;
  38842. __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
  38843. return __ret;
  38844. }
  38845. #else
  38846. __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
  38847. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38848. int16x4_t __ret;
  38849. __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
  38850. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38851. return __ret;
  38852. }
  38853. #endif
  38854. #ifdef __LITTLE_ENDIAN__
  38855. __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
  38856. uint16x8_t __ret;
  38857. __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
  38858. return __ret;
  38859. }
  38860. #else
  38861. __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
  38862. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38863. uint16x8_t __ret;
  38864. __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
  38865. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38866. return __ret;
  38867. }
  38868. #endif
  38869. #ifdef __LITTLE_ENDIAN__
  38870. __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
  38871. uint16x4_t __ret;
  38872. __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
  38873. return __ret;
  38874. }
  38875. #else
  38876. __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
  38877. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38878. uint16x4_t __ret;
  38879. __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
  38880. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38881. return __ret;
  38882. }
  38883. #endif
  38884. #ifdef __LITTLE_ENDIAN__
  38885. __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
  38886. int16x8_t __ret;
  38887. __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
  38888. return __ret;
  38889. }
  38890. #else
  38891. __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
  38892. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38893. int16x8_t __ret;
  38894. __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
  38895. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38896. return __ret;
  38897. }
  38898. #endif
  38899. #ifdef __LITTLE_ENDIAN__
  38900. __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
  38901. int16x4_t __ret;
  38902. __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
  38903. return __ret;
  38904. }
  38905. #else
  38906. __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
  38907. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38908. int16x4_t __ret;
  38909. __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
  38910. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38911. return __ret;
  38912. }
  38913. #endif
  38914. #ifdef __LITTLE_ENDIAN__
  38915. __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
  38916. uint16x8_t __ret;
  38917. __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
  38918. return __ret;
  38919. }
  38920. #else
  38921. __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
  38922. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38923. uint16x8_t __ret;
  38924. __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
  38925. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38926. return __ret;
  38927. }
  38928. #endif
  38929. #ifdef __LITTLE_ENDIAN__
  38930. __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
  38931. uint16x4_t __ret;
  38932. __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
  38933. return __ret;
  38934. }
  38935. #else
  38936. __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
  38937. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38938. uint16x4_t __ret;
  38939. __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
  38940. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38941. return __ret;
  38942. }
  38943. #endif
  38944. #ifdef __LITTLE_ENDIAN__
  38945. __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
  38946. int16x8_t __ret;
  38947. __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
  38948. return __ret;
  38949. }
  38950. #else
  38951. __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
  38952. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38953. int16x8_t __ret;
  38954. __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
  38955. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38956. return __ret;
  38957. }
  38958. #endif
  38959. #ifdef __LITTLE_ENDIAN__
  38960. __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
  38961. int16x4_t __ret;
  38962. __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
  38963. return __ret;
  38964. }
  38965. #else
  38966. __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
  38967. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38968. int16x4_t __ret;
  38969. __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
  38970. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  38971. return __ret;
  38972. }
  38973. #endif
  38974. #ifdef __LITTLE_ENDIAN__
  38975. __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
  38976. uint16x8_t __ret;
  38977. __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
  38978. return __ret;
  38979. }
  38980. #else
  38981. __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
  38982. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  38983. uint16x8_t __ret;
  38984. __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
  38985. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  38986. return __ret;
  38987. }
  38988. #endif
  38989. #ifdef __LITTLE_ENDIAN__
  38990. __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
  38991. uint16x4_t __ret;
  38992. __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
  38993. return __ret;
  38994. }
  38995. #else
  38996. __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
  38997. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  38998. uint16x4_t __ret;
  38999. __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
  39000. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39001. return __ret;
  39002. }
  39003. #endif
  39004. #ifdef __LITTLE_ENDIAN__
  39005. __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
  39006. float16x8_t __ret;
  39007. __ret = __p0 / __p1;
  39008. return __ret;
  39009. }
  39010. #else
  39011. __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
  39012. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39013. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39014. float16x8_t __ret;
  39015. __ret = __rev0 / __rev1;
  39016. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39017. return __ret;
  39018. }
  39019. #endif
  39020. #ifdef __LITTLE_ENDIAN__
  39021. __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
  39022. float16x4_t __ret;
  39023. __ret = __p0 / __p1;
  39024. return __ret;
  39025. }
  39026. #else
  39027. __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
  39028. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39029. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39030. float16x4_t __ret;
  39031. __ret = __rev0 / __rev1;
  39032. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39033. return __ret;
  39034. }
  39035. #endif
  39036. #ifdef __LITTLE_ENDIAN__
  39037. #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
  39038. float16x4_t __s0 = __p0; \
  39039. float16_t __ret; \
  39040. __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
  39041. __ret; \
  39042. })
  39043. #else
  39044. #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
  39045. float16x4_t __s0 = __p0; \
  39046. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39047. float16_t __ret; \
  39048. __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
  39049. __ret; \
  39050. })
  39051. #endif
  39052. #ifdef __LITTLE_ENDIAN__
  39053. #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
  39054. float16x8_t __s0 = __p0; \
  39055. float16_t __ret; \
  39056. __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
  39057. __ret; \
  39058. })
  39059. #else
  39060. #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
  39061. float16x8_t __s0 = __p0; \
  39062. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39063. float16_t __ret; \
  39064. __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
  39065. __ret; \
  39066. })
  39067. #endif
  39068. #ifdef __LITTLE_ENDIAN__
  39069. #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
  39070. float16x8_t __s0 = __p0; \
  39071. float16x8_t __s1 = __p1; \
  39072. float16x8_t __ret; \
  39073. __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
  39074. __ret; \
  39075. })
  39076. #else
  39077. #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
  39078. float16x8_t __s0 = __p0; \
  39079. float16x8_t __s1 = __p1; \
  39080. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39081. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39082. float16x8_t __ret; \
  39083. __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
  39084. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39085. __ret; \
  39086. })
  39087. #endif
  39088. #ifdef __LITTLE_ENDIAN__
  39089. #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
  39090. float16x4_t __s0 = __p0; \
  39091. float16x4_t __s1 = __p1; \
  39092. float16x4_t __ret; \
  39093. __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
  39094. __ret; \
  39095. })
  39096. #else
  39097. #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
  39098. float16x4_t __s0 = __p0; \
  39099. float16x4_t __s1 = __p1; \
  39100. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39101. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39102. float16x4_t __ret; \
  39103. __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
  39104. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39105. __ret; \
  39106. })
  39107. #endif
  39108. #ifdef __LITTLE_ENDIAN__
  39109. __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  39110. float16x8_t __ret;
  39111. __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
  39112. return __ret;
  39113. }
  39114. #else
  39115. __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  39116. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39117. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39118. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  39119. float16x8_t __ret;
  39120. __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
  39121. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39122. return __ret;
  39123. }
  39124. __ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  39125. float16x8_t __ret;
  39126. __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
  39127. return __ret;
  39128. }
  39129. #endif
  39130. #ifdef __LITTLE_ENDIAN__
  39131. __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  39132. float16x4_t __ret;
  39133. __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
  39134. return __ret;
  39135. }
  39136. #else
  39137. __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  39138. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39139. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39140. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  39141. float16x4_t __ret;
  39142. __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
  39143. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39144. return __ret;
  39145. }
  39146. __ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  39147. float16x4_t __ret;
  39148. __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
  39149. return __ret;
  39150. }
  39151. #endif
  39152. #ifdef __LITTLE_ENDIAN__
  39153. #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39154. float16_t __s0 = __p0; \
  39155. float16_t __s1 = __p1; \
  39156. float16x4_t __s2 = __p2; \
  39157. float16_t __ret; \
  39158. __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
  39159. __ret; \
  39160. })
  39161. #else
  39162. #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39163. float16_t __s0 = __p0; \
  39164. float16_t __s1 = __p1; \
  39165. float16x4_t __s2 = __p2; \
  39166. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  39167. float16_t __ret; \
  39168. __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
  39169. __ret; \
  39170. })
  39171. #define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39172. float16_t __s0 = __p0; \
  39173. float16_t __s1 = __p1; \
  39174. float16x4_t __s2 = __p2; \
  39175. float16_t __ret; \
  39176. __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
  39177. __ret; \
  39178. })
  39179. #endif
  39180. #ifdef __LITTLE_ENDIAN__
  39181. #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39182. float16x8_t __s0 = __p0; \
  39183. float16x8_t __s1 = __p1; \
  39184. float16x4_t __s2 = __p2; \
  39185. float16x8_t __ret; \
  39186. __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
  39187. __ret; \
  39188. })
  39189. #else
  39190. #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39191. float16x8_t __s0 = __p0; \
  39192. float16x8_t __s1 = __p1; \
  39193. float16x4_t __s2 = __p2; \
  39194. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39195. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39196. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  39197. float16x8_t __ret; \
  39198. __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
  39199. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39200. __ret; \
  39201. })
  39202. #define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39203. float16x8_t __s0 = __p0; \
  39204. float16x8_t __s1 = __p1; \
  39205. float16x4_t __s2 = __p2; \
  39206. float16x8_t __ret; \
  39207. __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
  39208. __ret; \
  39209. })
  39210. #endif
  39211. #ifdef __LITTLE_ENDIAN__
  39212. #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39213. float16x4_t __s0 = __p0; \
  39214. float16x4_t __s1 = __p1; \
  39215. float16x4_t __s2 = __p2; \
  39216. float16x4_t __ret; \
  39217. __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
  39218. __ret; \
  39219. })
  39220. #else
  39221. #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39222. float16x4_t __s0 = __p0; \
  39223. float16x4_t __s1 = __p1; \
  39224. float16x4_t __s2 = __p2; \
  39225. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39226. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39227. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  39228. float16x4_t __ret; \
  39229. __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
  39230. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39231. __ret; \
  39232. })
  39233. #define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39234. float16x4_t __s0 = __p0; \
  39235. float16x4_t __s1 = __p1; \
  39236. float16x4_t __s2 = __p2; \
  39237. float16x4_t __ret; \
  39238. __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
  39239. __ret; \
  39240. })
  39241. #endif
  39242. #ifdef __LITTLE_ENDIAN__
  39243. #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39244. float16_t __s0 = __p0; \
  39245. float16_t __s1 = __p1; \
  39246. float16x8_t __s2 = __p2; \
  39247. float16_t __ret; \
  39248. __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
  39249. __ret; \
  39250. })
  39251. #else
  39252. #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39253. float16_t __s0 = __p0; \
  39254. float16_t __s1 = __p1; \
  39255. float16x8_t __s2 = __p2; \
  39256. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  39257. float16_t __ret; \
  39258. __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
  39259. __ret; \
  39260. })
  39261. #define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39262. float16_t __s0 = __p0; \
  39263. float16_t __s1 = __p1; \
  39264. float16x8_t __s2 = __p2; \
  39265. float16_t __ret; \
  39266. __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
  39267. __ret; \
  39268. })
  39269. #endif
  39270. #ifdef __LITTLE_ENDIAN__
  39271. #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39272. float16x8_t __s0 = __p0; \
  39273. float16x8_t __s1 = __p1; \
  39274. float16x8_t __s2 = __p2; \
  39275. float16x8_t __ret; \
  39276. __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
  39277. __ret; \
  39278. })
  39279. #else
  39280. #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39281. float16x8_t __s0 = __p0; \
  39282. float16x8_t __s1 = __p1; \
  39283. float16x8_t __s2 = __p2; \
  39284. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39285. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39286. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  39287. float16x8_t __ret; \
  39288. __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
  39289. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39290. __ret; \
  39291. })
  39292. #define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39293. float16x8_t __s0 = __p0; \
  39294. float16x8_t __s1 = __p1; \
  39295. float16x8_t __s2 = __p2; \
  39296. float16x8_t __ret; \
  39297. __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
  39298. __ret; \
  39299. })
  39300. #endif
  39301. #ifdef __LITTLE_ENDIAN__
  39302. #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39303. float16x4_t __s0 = __p0; \
  39304. float16x4_t __s1 = __p1; \
  39305. float16x8_t __s2 = __p2; \
  39306. float16x4_t __ret; \
  39307. __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
  39308. __ret; \
  39309. })
  39310. #else
  39311. #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39312. float16x4_t __s0 = __p0; \
  39313. float16x4_t __s1 = __p1; \
  39314. float16x8_t __s2 = __p2; \
  39315. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39316. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39317. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  39318. float16x4_t __ret; \
  39319. __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
  39320. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39321. __ret; \
  39322. })
  39323. #define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
  39324. float16x4_t __s0 = __p0; \
  39325. float16x4_t __s1 = __p1; \
  39326. float16x8_t __s2 = __p2; \
  39327. float16x4_t __ret; \
  39328. __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
  39329. __ret; \
  39330. })
  39331. #endif
  39332. #ifdef __LITTLE_ENDIAN__
  39333. #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39334. float16x8_t __s0 = __p0; \
  39335. float16x8_t __s1 = __p1; \
  39336. float16_t __s2 = __p2; \
  39337. float16x8_t __ret; \
  39338. __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
  39339. __ret; \
  39340. })
  39341. #else
  39342. #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39343. float16x8_t __s0 = __p0; \
  39344. float16x8_t __s1 = __p1; \
  39345. float16_t __s2 = __p2; \
  39346. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39347. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39348. float16x8_t __ret; \
  39349. __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
  39350. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39351. __ret; \
  39352. })
  39353. #endif
  39354. #ifdef __LITTLE_ENDIAN__
  39355. #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39356. float16x4_t __s0 = __p0; \
  39357. float16x4_t __s1 = __p1; \
  39358. float16_t __s2 = __p2; \
  39359. float16x4_t __ret; \
  39360. __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
  39361. __ret; \
  39362. })
  39363. #else
  39364. #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39365. float16x4_t __s0 = __p0; \
  39366. float16x4_t __s1 = __p1; \
  39367. float16_t __s2 = __p2; \
  39368. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39369. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39370. float16x4_t __ret; \
  39371. __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
  39372. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39373. __ret; \
  39374. })
  39375. #endif
  39376. #ifdef __LITTLE_ENDIAN__
  39377. __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  39378. float16x8_t __ret;
  39379. __ret = vfmaq_f16(__p0, -__p1, __p2);
  39380. return __ret;
  39381. }
  39382. #else
  39383. __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
  39384. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39385. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39386. float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  39387. float16x8_t __ret;
  39388. __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
  39389. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39390. return __ret;
  39391. }
  39392. #endif
  39393. #ifdef __LITTLE_ENDIAN__
  39394. __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  39395. float16x4_t __ret;
  39396. __ret = vfma_f16(__p0, -__p1, __p2);
  39397. return __ret;
  39398. }
  39399. #else
  39400. __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
  39401. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39402. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39403. float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  39404. float16x4_t __ret;
  39405. __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
  39406. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39407. return __ret;
  39408. }
  39409. #endif
  39410. #ifdef __LITTLE_ENDIAN__
  39411. #define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
  39412. float16_t __s0_0 = __p0_0; \
  39413. float16_t __s1_0 = __p1_0; \
  39414. float16x4_t __s2_0 = __p2_0; \
  39415. float16_t __ret_0; \
  39416. __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
  39417. __ret_0; \
  39418. })
  39419. #else
  39420. #define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
  39421. float16_t __s0_1 = __p0_1; \
  39422. float16_t __s1_1 = __p1_1; \
  39423. float16x4_t __s2_1 = __p2_1; \
  39424. float16x4_t __rev2_1; __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
  39425. float16_t __ret_1; \
  39426. __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
  39427. __ret_1; \
  39428. })
  39429. #endif
  39430. #ifdef __LITTLE_ENDIAN__
  39431. #define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
  39432. float16x8_t __s0_2 = __p0_2; \
  39433. float16x8_t __s1_2 = __p1_2; \
  39434. float16x4_t __s2_2 = __p2_2; \
  39435. float16x8_t __ret_2; \
  39436. __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
  39437. __ret_2; \
  39438. })
  39439. #else
  39440. #define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
  39441. float16x8_t __s0_3 = __p0_3; \
  39442. float16x8_t __s1_3 = __p1_3; \
  39443. float16x4_t __s2_3 = __p2_3; \
  39444. float16x8_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
  39445. float16x8_t __rev1_3; __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
  39446. float16x4_t __rev2_3; __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
  39447. float16x8_t __ret_3; \
  39448. __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
  39449. __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
  39450. __ret_3; \
  39451. })
  39452. #endif
  39453. #ifdef __LITTLE_ENDIAN__
  39454. #define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
  39455. float16x4_t __s0_4 = __p0_4; \
  39456. float16x4_t __s1_4 = __p1_4; \
  39457. float16x4_t __s2_4 = __p2_4; \
  39458. float16x4_t __ret_4; \
  39459. __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
  39460. __ret_4; \
  39461. })
  39462. #else
  39463. #define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
  39464. float16x4_t __s0_5 = __p0_5; \
  39465. float16x4_t __s1_5 = __p1_5; \
  39466. float16x4_t __s2_5 = __p2_5; \
  39467. float16x4_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
  39468. float16x4_t __rev1_5; __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
  39469. float16x4_t __rev2_5; __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
  39470. float16x4_t __ret_5; \
  39471. __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
  39472. __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
  39473. __ret_5; \
  39474. })
  39475. #endif
  39476. #ifdef __LITTLE_ENDIAN__
  39477. #define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
  39478. float16_t __s0_6 = __p0_6; \
  39479. float16_t __s1_6 = __p1_6; \
  39480. float16x8_t __s2_6 = __p2_6; \
  39481. float16_t __ret_6; \
  39482. __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
  39483. __ret_6; \
  39484. })
  39485. #else
  39486. #define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
  39487. float16_t __s0_7 = __p0_7; \
  39488. float16_t __s1_7 = __p1_7; \
  39489. float16x8_t __s2_7 = __p2_7; \
  39490. float16x8_t __rev2_7; __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
  39491. float16_t __ret_7; \
  39492. __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
  39493. __ret_7; \
  39494. })
  39495. #endif
  39496. #ifdef __LITTLE_ENDIAN__
  39497. #define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
  39498. float16x8_t __s0_8 = __p0_8; \
  39499. float16x8_t __s1_8 = __p1_8; \
  39500. float16x8_t __s2_8 = __p2_8; \
  39501. float16x8_t __ret_8; \
  39502. __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
  39503. __ret_8; \
  39504. })
  39505. #else
  39506. #define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
  39507. float16x8_t __s0_9 = __p0_9; \
  39508. float16x8_t __s1_9 = __p1_9; \
  39509. float16x8_t __s2_9 = __p2_9; \
  39510. float16x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
  39511. float16x8_t __rev1_9; __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
  39512. float16x8_t __rev2_9; __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
  39513. float16x8_t __ret_9; \
  39514. __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
  39515. __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
  39516. __ret_9; \
  39517. })
  39518. #endif
  39519. #ifdef __LITTLE_ENDIAN__
  39520. #define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
  39521. float16x4_t __s0_10 = __p0_10; \
  39522. float16x4_t __s1_10 = __p1_10; \
  39523. float16x8_t __s2_10 = __p2_10; \
  39524. float16x4_t __ret_10; \
  39525. __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
  39526. __ret_10; \
  39527. })
  39528. #else
  39529. #define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
  39530. float16x4_t __s0_11 = __p0_11; \
  39531. float16x4_t __s1_11 = __p1_11; \
  39532. float16x8_t __s2_11 = __p2_11; \
  39533. float16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
  39534. float16x4_t __rev1_11; __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
  39535. float16x8_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
  39536. float16x4_t __ret_11; \
  39537. __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
  39538. __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
  39539. __ret_11; \
  39540. })
  39541. #endif
  39542. #ifdef __LITTLE_ENDIAN__
  39543. #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39544. float16x8_t __s0 = __p0; \
  39545. float16x8_t __s1 = __p1; \
  39546. float16_t __s2 = __p2; \
  39547. float16x8_t __ret; \
  39548. __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
  39549. __ret; \
  39550. })
  39551. #else
  39552. #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39553. float16x8_t __s0 = __p0; \
  39554. float16x8_t __s1 = __p1; \
  39555. float16_t __s2 = __p2; \
  39556. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39557. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39558. float16x8_t __ret; \
  39559. __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
  39560. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39561. __ret; \
  39562. })
  39563. #endif
  39564. #ifdef __LITTLE_ENDIAN__
  39565. #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39566. float16x4_t __s0 = __p0; \
  39567. float16x4_t __s1 = __p1; \
  39568. float16_t __s2 = __p2; \
  39569. float16x4_t __ret; \
  39570. __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
  39571. __ret; \
  39572. })
  39573. #else
  39574. #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
  39575. float16x4_t __s0 = __p0; \
  39576. float16x4_t __s1 = __p1; \
  39577. float16_t __s2 = __p2; \
  39578. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39579. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39580. float16x4_t __ret; \
  39581. __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
  39582. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39583. __ret; \
  39584. })
  39585. #endif
  39586. #ifdef __LITTLE_ENDIAN__
  39587. __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
  39588. float16x8_t __ret;
  39589. __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  39590. return __ret;
  39591. }
  39592. #else
  39593. __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
  39594. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39595. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39596. float16x8_t __ret;
  39597. __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  39598. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39599. return __ret;
  39600. }
  39601. #endif
  39602. #ifdef __LITTLE_ENDIAN__
  39603. __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
  39604. float16x4_t __ret;
  39605. __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  39606. return __ret;
  39607. }
  39608. #else
  39609. __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
  39610. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39611. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39612. float16x4_t __ret;
  39613. __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  39614. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39615. return __ret;
  39616. }
  39617. #endif
  39618. #ifdef __LITTLE_ENDIAN__
  39619. __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  39620. float16x8_t __ret;
  39621. __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  39622. return __ret;
  39623. }
  39624. #else
  39625. __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  39626. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39627. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39628. float16x8_t __ret;
  39629. __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  39630. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39631. return __ret;
  39632. }
  39633. #endif
  39634. #ifdef __LITTLE_ENDIAN__
  39635. __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
  39636. float16x4_t __ret;
  39637. __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  39638. return __ret;
  39639. }
  39640. #else
  39641. __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
  39642. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39643. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39644. float16x4_t __ret;
  39645. __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  39646. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39647. return __ret;
  39648. }
  39649. #endif
  39650. #ifdef __LITTLE_ENDIAN__
  39651. #define vmaxnmvq_f16(__p0) __extension__ ({ \
  39652. float16x8_t __s0 = __p0; \
  39653. float16_t __ret; \
  39654. __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
  39655. __ret; \
  39656. })
  39657. #else
  39658. #define vmaxnmvq_f16(__p0) __extension__ ({ \
  39659. float16x8_t __s0 = __p0; \
  39660. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39661. float16_t __ret; \
  39662. __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
  39663. __ret; \
  39664. })
  39665. #endif
  39666. #ifdef __LITTLE_ENDIAN__
  39667. #define vmaxnmv_f16(__p0) __extension__ ({ \
  39668. float16x4_t __s0 = __p0; \
  39669. float16_t __ret; \
  39670. __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
  39671. __ret; \
  39672. })
  39673. #else
  39674. #define vmaxnmv_f16(__p0) __extension__ ({ \
  39675. float16x4_t __s0 = __p0; \
  39676. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39677. float16_t __ret; \
  39678. __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
  39679. __ret; \
  39680. })
  39681. #endif
  39682. #ifdef __LITTLE_ENDIAN__
  39683. #define vmaxvq_f16(__p0) __extension__ ({ \
  39684. float16x8_t __s0 = __p0; \
  39685. float16_t __ret; \
  39686. __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
  39687. __ret; \
  39688. })
  39689. #else
  39690. #define vmaxvq_f16(__p0) __extension__ ({ \
  39691. float16x8_t __s0 = __p0; \
  39692. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39693. float16_t __ret; \
  39694. __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
  39695. __ret; \
  39696. })
  39697. #endif
  39698. #ifdef __LITTLE_ENDIAN__
  39699. #define vmaxv_f16(__p0) __extension__ ({ \
  39700. float16x4_t __s0 = __p0; \
  39701. float16_t __ret; \
  39702. __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
  39703. __ret; \
  39704. })
  39705. #else
  39706. #define vmaxv_f16(__p0) __extension__ ({ \
  39707. float16x4_t __s0 = __p0; \
  39708. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39709. float16_t __ret; \
  39710. __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
  39711. __ret; \
  39712. })
  39713. #endif
  39714. #ifdef __LITTLE_ENDIAN__
  39715. __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
  39716. float16x8_t __ret;
  39717. __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  39718. return __ret;
  39719. }
  39720. #else
  39721. __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
  39722. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39723. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39724. float16x8_t __ret;
  39725. __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  39726. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39727. return __ret;
  39728. }
  39729. #endif
  39730. #ifdef __LITTLE_ENDIAN__
  39731. __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
  39732. float16x4_t __ret;
  39733. __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  39734. return __ret;
  39735. }
  39736. #else
  39737. __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
  39738. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39739. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39740. float16x4_t __ret;
  39741. __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  39742. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39743. return __ret;
  39744. }
  39745. #endif
  39746. #ifdef __LITTLE_ENDIAN__
  39747. __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  39748. float16x8_t __ret;
  39749. __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  39750. return __ret;
  39751. }
  39752. #else
  39753. __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  39754. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39755. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39756. float16x8_t __ret;
  39757. __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  39758. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39759. return __ret;
  39760. }
  39761. #endif
  39762. #ifdef __LITTLE_ENDIAN__
  39763. __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
  39764. float16x4_t __ret;
  39765. __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  39766. return __ret;
  39767. }
  39768. #else
  39769. __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
  39770. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39771. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39772. float16x4_t __ret;
  39773. __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  39774. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39775. return __ret;
  39776. }
  39777. #endif
  39778. #ifdef __LITTLE_ENDIAN__
  39779. #define vminnmvq_f16(__p0) __extension__ ({ \
  39780. float16x8_t __s0 = __p0; \
  39781. float16_t __ret; \
  39782. __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
  39783. __ret; \
  39784. })
  39785. #else
  39786. #define vminnmvq_f16(__p0) __extension__ ({ \
  39787. float16x8_t __s0 = __p0; \
  39788. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39789. float16_t __ret; \
  39790. __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
  39791. __ret; \
  39792. })
  39793. #endif
  39794. #ifdef __LITTLE_ENDIAN__
  39795. #define vminnmv_f16(__p0) __extension__ ({ \
  39796. float16x4_t __s0 = __p0; \
  39797. float16_t __ret; \
  39798. __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
  39799. __ret; \
  39800. })
  39801. #else
  39802. #define vminnmv_f16(__p0) __extension__ ({ \
  39803. float16x4_t __s0 = __p0; \
  39804. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39805. float16_t __ret; \
  39806. __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
  39807. __ret; \
  39808. })
  39809. #endif
  39810. #ifdef __LITTLE_ENDIAN__
  39811. #define vminvq_f16(__p0) __extension__ ({ \
  39812. float16x8_t __s0 = __p0; \
  39813. float16_t __ret; \
  39814. __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
  39815. __ret; \
  39816. })
  39817. #else
  39818. #define vminvq_f16(__p0) __extension__ ({ \
  39819. float16x8_t __s0 = __p0; \
  39820. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39821. float16_t __ret; \
  39822. __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
  39823. __ret; \
  39824. })
  39825. #endif
  39826. #ifdef __LITTLE_ENDIAN__
  39827. #define vminv_f16(__p0) __extension__ ({ \
  39828. float16x4_t __s0 = __p0; \
  39829. float16_t __ret; \
  39830. __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
  39831. __ret; \
  39832. })
  39833. #else
  39834. #define vminv_f16(__p0) __extension__ ({ \
  39835. float16x4_t __s0 = __p0; \
  39836. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39837. float16_t __ret; \
  39838. __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
  39839. __ret; \
  39840. })
  39841. #endif
  39842. #ifdef __LITTLE_ENDIAN__
  39843. __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
  39844. float16x8_t __ret;
  39845. __ret = __p0 * __p1;
  39846. return __ret;
  39847. }
  39848. #else
  39849. __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
  39850. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  39851. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  39852. float16x8_t __ret;
  39853. __ret = __rev0 * __rev1;
  39854. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  39855. return __ret;
  39856. }
  39857. #endif
  39858. #ifdef __LITTLE_ENDIAN__
  39859. __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
  39860. float16x4_t __ret;
  39861. __ret = __p0 * __p1;
  39862. return __ret;
  39863. }
  39864. #else
  39865. __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
  39866. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  39867. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  39868. float16x4_t __ret;
  39869. __ret = __rev0 * __rev1;
  39870. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  39871. return __ret;
  39872. }
  39873. #endif
  39874. #ifdef __LITTLE_ENDIAN__
  39875. #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  39876. float16x8_t __s0 = __p0; \
  39877. float16x4_t __s1 = __p1; \
  39878. float16x8_t __ret; \
  39879. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  39880. __ret; \
  39881. })
  39882. #else
  39883. #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  39884. float16x8_t __s0 = __p0; \
  39885. float16x4_t __s1 = __p1; \
  39886. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39887. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39888. float16x8_t __ret; \
  39889. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  39890. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39891. __ret; \
  39892. })
  39893. #endif
  39894. #ifdef __LITTLE_ENDIAN__
  39895. #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  39896. float16x4_t __s0 = __p0; \
  39897. float16x4_t __s1 = __p1; \
  39898. float16x4_t __ret; \
  39899. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  39900. __ret; \
  39901. })
  39902. #else
  39903. #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  39904. float16x4_t __s0 = __p0; \
  39905. float16x4_t __s1 = __p1; \
  39906. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39907. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  39908. float16x4_t __ret; \
  39909. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  39910. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39911. __ret; \
  39912. })
  39913. #endif
  39914. #ifdef __LITTLE_ENDIAN__
  39915. #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  39916. float16x8_t __s0 = __p0; \
  39917. float16x8_t __s1 = __p1; \
  39918. float16x8_t __ret; \
  39919. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  39920. __ret; \
  39921. })
  39922. #else
  39923. #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  39924. float16x8_t __s0 = __p0; \
  39925. float16x8_t __s1 = __p1; \
  39926. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39927. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39928. float16x8_t __ret; \
  39929. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  39930. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39931. __ret; \
  39932. })
  39933. #endif
  39934. #ifdef __LITTLE_ENDIAN__
  39935. #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  39936. float16x4_t __s0 = __p0; \
  39937. float16x8_t __s1 = __p1; \
  39938. float16x4_t __ret; \
  39939. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  39940. __ret; \
  39941. })
  39942. #else
  39943. #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  39944. float16x4_t __s0 = __p0; \
  39945. float16x8_t __s1 = __p1; \
  39946. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39947. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  39948. float16x4_t __ret; \
  39949. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  39950. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39951. __ret; \
  39952. })
  39953. #endif
  39954. #ifdef __LITTLE_ENDIAN__
  39955. #define vmulq_n_f16(__p0, __p1) __extension__ ({ \
  39956. float16x8_t __s0 = __p0; \
  39957. float16_t __s1 = __p1; \
  39958. float16x8_t __ret; \
  39959. __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
  39960. __ret; \
  39961. })
  39962. #else
  39963. #define vmulq_n_f16(__p0, __p1) __extension__ ({ \
  39964. float16x8_t __s0 = __p0; \
  39965. float16_t __s1 = __p1; \
  39966. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  39967. float16x8_t __ret; \
  39968. __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
  39969. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  39970. __ret; \
  39971. })
  39972. #endif
  39973. #ifdef __LITTLE_ENDIAN__
  39974. #define vmul_n_f16(__p0, __p1) __extension__ ({ \
  39975. float16x4_t __s0 = __p0; \
  39976. float16_t __s1 = __p1; \
  39977. float16x4_t __ret; \
  39978. __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
  39979. __ret; \
  39980. })
  39981. #else
  39982. #define vmul_n_f16(__p0, __p1) __extension__ ({ \
  39983. float16x4_t __s0 = __p0; \
  39984. float16_t __s1 = __p1; \
  39985. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  39986. float16x4_t __ret; \
  39987. __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
  39988. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  39989. __ret; \
  39990. })
  39991. #endif
  39992. #ifdef __LITTLE_ENDIAN__
  39993. __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
  39994. float16x8_t __ret;
  39995. __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  39996. return __ret;
  39997. }
  39998. #else
  39999. __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
  40000. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40001. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40002. float16x8_t __ret;
  40003. __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40004. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40005. return __ret;
  40006. }
  40007. __ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
  40008. float16x8_t __ret;
  40009. __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40010. return __ret;
  40011. }
  40012. #endif
  40013. #ifdef __LITTLE_ENDIAN__
  40014. __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
  40015. float16x4_t __ret;
  40016. __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40017. return __ret;
  40018. }
  40019. #else
  40020. __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
  40021. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40022. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40023. float16x4_t __ret;
  40024. __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40025. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40026. return __ret;
  40027. }
  40028. __ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
  40029. float16x4_t __ret;
  40030. __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40031. return __ret;
  40032. }
  40033. #endif
  40034. #ifdef __LITTLE_ENDIAN__
  40035. #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  40036. float16x8_t __s0 = __p0; \
  40037. float16x4_t __s1 = __p1; \
  40038. float16x8_t __ret; \
  40039. __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  40040. __ret; \
  40041. })
  40042. #else
  40043. #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  40044. float16x8_t __s0 = __p0; \
  40045. float16x4_t __s1 = __p1; \
  40046. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  40047. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  40048. float16x8_t __ret; \
  40049. __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  40050. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  40051. __ret; \
  40052. })
  40053. #endif
  40054. #ifdef __LITTLE_ENDIAN__
  40055. #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  40056. float16x4_t __s0 = __p0; \
  40057. float16x4_t __s1 = __p1; \
  40058. float16x4_t __ret; \
  40059. __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  40060. __ret; \
  40061. })
  40062. #else
  40063. #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
  40064. float16x4_t __s0 = __p0; \
  40065. float16x4_t __s1 = __p1; \
  40066. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  40067. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  40068. float16x4_t __ret; \
  40069. __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  40070. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  40071. __ret; \
  40072. })
  40073. #endif
  40074. #ifdef __LITTLE_ENDIAN__
  40075. #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  40076. float16x8_t __s0 = __p0; \
  40077. float16x8_t __s1 = __p1; \
  40078. float16x8_t __ret; \
  40079. __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  40080. __ret; \
  40081. })
  40082. #else
  40083. #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  40084. float16x8_t __s0 = __p0; \
  40085. float16x8_t __s1 = __p1; \
  40086. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  40087. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  40088. float16x8_t __ret; \
  40089. __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  40090. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  40091. __ret; \
  40092. })
  40093. #endif
  40094. #ifdef __LITTLE_ENDIAN__
  40095. #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  40096. float16x4_t __s0 = __p0; \
  40097. float16x8_t __s1 = __p1; \
  40098. float16x4_t __ret; \
  40099. __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  40100. __ret; \
  40101. })
  40102. #else
  40103. #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
  40104. float16x4_t __s0 = __p0; \
  40105. float16x8_t __s1 = __p1; \
  40106. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  40107. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  40108. float16x4_t __ret; \
  40109. __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  40110. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  40111. __ret; \
  40112. })
  40113. #endif
  40114. #ifdef __LITTLE_ENDIAN__
  40115. #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
  40116. float16x8_t __s0 = __p0; \
  40117. float16_t __s1 = __p1; \
  40118. float16x8_t __ret; \
  40119. __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
  40120. __ret; \
  40121. })
  40122. #else
  40123. #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
  40124. float16x8_t __s0 = __p0; \
  40125. float16_t __s1 = __p1; \
  40126. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  40127. float16x8_t __ret; \
  40128. __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
  40129. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  40130. __ret; \
  40131. })
  40132. #endif
  40133. #ifdef __LITTLE_ENDIAN__
  40134. #define vmulx_n_f16(__p0, __p1) __extension__ ({ \
  40135. float16x4_t __s0 = __p0; \
  40136. float16_t __s1 = __p1; \
  40137. float16x4_t __ret; \
  40138. __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
  40139. __ret; \
  40140. })
  40141. #else
  40142. #define vmulx_n_f16(__p0, __p1) __extension__ ({ \
  40143. float16x4_t __s0 = __p0; \
  40144. float16_t __s1 = __p1; \
  40145. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  40146. float16x4_t __ret; \
  40147. __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
  40148. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  40149. __ret; \
  40150. })
  40151. #endif
  40152. #ifdef __LITTLE_ENDIAN__
  40153. __ai float16x8_t vnegq_f16(float16x8_t __p0) {
  40154. float16x8_t __ret;
  40155. __ret = -__p0;
  40156. return __ret;
  40157. }
  40158. #else
  40159. __ai float16x8_t vnegq_f16(float16x8_t __p0) {
  40160. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40161. float16x8_t __ret;
  40162. __ret = -__rev0;
  40163. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40164. return __ret;
  40165. }
  40166. #endif
  40167. #ifdef __LITTLE_ENDIAN__
  40168. __ai float16x4_t vneg_f16(float16x4_t __p0) {
  40169. float16x4_t __ret;
  40170. __ret = -__p0;
  40171. return __ret;
  40172. }
  40173. #else
  40174. __ai float16x4_t vneg_f16(float16x4_t __p0) {
  40175. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40176. float16x4_t __ret;
  40177. __ret = -__rev0;
  40178. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40179. return __ret;
  40180. }
  40181. #endif
  40182. #ifdef __LITTLE_ENDIAN__
  40183. __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
  40184. float16x8_t __ret;
  40185. __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40186. return __ret;
  40187. }
  40188. #else
  40189. __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
  40190. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40191. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40192. float16x8_t __ret;
  40193. __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40194. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40195. return __ret;
  40196. }
  40197. #endif
  40198. #ifdef __LITTLE_ENDIAN__
  40199. __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
  40200. float16x4_t __ret;
  40201. __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40202. return __ret;
  40203. }
  40204. #else
  40205. __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
  40206. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40207. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40208. float16x4_t __ret;
  40209. __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40210. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40211. return __ret;
  40212. }
  40213. #endif
  40214. #ifdef __LITTLE_ENDIAN__
  40215. __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
  40216. float16x8_t __ret;
  40217. __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40218. return __ret;
  40219. }
  40220. #else
  40221. __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
  40222. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40223. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40224. float16x8_t __ret;
  40225. __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40226. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40227. return __ret;
  40228. }
  40229. #endif
  40230. #ifdef __LITTLE_ENDIAN__
  40231. __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
  40232. float16x4_t __ret;
  40233. __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40234. return __ret;
  40235. }
  40236. #else
  40237. __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
  40238. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40239. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40240. float16x4_t __ret;
  40241. __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40242. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40243. return __ret;
  40244. }
  40245. #endif
  40246. #ifdef __LITTLE_ENDIAN__
  40247. __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  40248. float16x8_t __ret;
  40249. __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40250. return __ret;
  40251. }
  40252. #else
  40253. __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  40254. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40255. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40256. float16x8_t __ret;
  40257. __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40258. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40259. return __ret;
  40260. }
  40261. #endif
  40262. #ifdef __LITTLE_ENDIAN__
  40263. __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
  40264. float16x4_t __ret;
  40265. __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40266. return __ret;
  40267. }
  40268. #else
  40269. __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
  40270. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40271. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40272. float16x4_t __ret;
  40273. __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40274. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40275. return __ret;
  40276. }
  40277. #endif
  40278. #ifdef __LITTLE_ENDIAN__
  40279. __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
  40280. float16x8_t __ret;
  40281. __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40282. return __ret;
  40283. }
  40284. #else
  40285. __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
  40286. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40287. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40288. float16x8_t __ret;
  40289. __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40290. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40291. return __ret;
  40292. }
  40293. #endif
  40294. #ifdef __LITTLE_ENDIAN__
  40295. __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
  40296. float16x4_t __ret;
  40297. __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40298. return __ret;
  40299. }
  40300. #else
  40301. __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
  40302. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40303. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40304. float16x4_t __ret;
  40305. __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40306. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40307. return __ret;
  40308. }
  40309. #endif
  40310. #ifdef __LITTLE_ENDIAN__
  40311. __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  40312. float16x8_t __ret;
  40313. __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40314. return __ret;
  40315. }
  40316. #else
  40317. __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
  40318. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40319. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40320. float16x8_t __ret;
  40321. __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40322. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40323. return __ret;
  40324. }
  40325. #endif
  40326. #ifdef __LITTLE_ENDIAN__
  40327. __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
  40328. float16x4_t __ret;
  40329. __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40330. return __ret;
  40331. }
  40332. #else
  40333. __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
  40334. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40335. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40336. float16x4_t __ret;
  40337. __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40338. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40339. return __ret;
  40340. }
  40341. #endif
  40342. #ifdef __LITTLE_ENDIAN__
  40343. __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
  40344. float16x8_t __ret;
  40345. __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
  40346. return __ret;
  40347. }
  40348. #else
  40349. __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
  40350. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40351. float16x8_t __ret;
  40352. __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
  40353. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40354. return __ret;
  40355. }
  40356. #endif
  40357. #ifdef __LITTLE_ENDIAN__
  40358. __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
  40359. float16x4_t __ret;
  40360. __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
  40361. return __ret;
  40362. }
  40363. #else
  40364. __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
  40365. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40366. float16x4_t __ret;
  40367. __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
  40368. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40369. return __ret;
  40370. }
  40371. #endif
  40372. #ifdef __LITTLE_ENDIAN__
  40373. __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
  40374. float16x8_t __ret;
  40375. __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40376. return __ret;
  40377. }
  40378. #else
  40379. __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
  40380. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40381. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40382. float16x8_t __ret;
  40383. __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40384. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40385. return __ret;
  40386. }
  40387. #endif
  40388. #ifdef __LITTLE_ENDIAN__
  40389. __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
  40390. float16x4_t __ret;
  40391. __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40392. return __ret;
  40393. }
  40394. #else
  40395. __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
  40396. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40397. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40398. float16x4_t __ret;
  40399. __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40400. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40401. return __ret;
  40402. }
  40403. #endif
  40404. #ifdef __LITTLE_ENDIAN__
  40405. __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
  40406. float16x8_t __ret;
  40407. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
  40408. return __ret;
  40409. }
  40410. #else
  40411. __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
  40412. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40413. float16x8_t __ret;
  40414. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
  40415. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40416. return __ret;
  40417. }
  40418. #endif
  40419. #ifdef __LITTLE_ENDIAN__
  40420. __ai float16x4_t vrev64_f16(float16x4_t __p0) {
  40421. float16x4_t __ret;
  40422. __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40423. return __ret;
  40424. }
  40425. #else
  40426. __ai float16x4_t vrev64_f16(float16x4_t __p0) {
  40427. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40428. float16x4_t __ret;
  40429. __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
  40430. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40431. return __ret;
  40432. }
  40433. #endif
  40434. #ifdef __LITTLE_ENDIAN__
  40435. __ai float16x8_t vrndq_f16(float16x8_t __p0) {
  40436. float16x8_t __ret;
  40437. __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
  40438. return __ret;
  40439. }
  40440. #else
  40441. __ai float16x8_t vrndq_f16(float16x8_t __p0) {
  40442. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40443. float16x8_t __ret;
  40444. __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
  40445. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40446. return __ret;
  40447. }
  40448. #endif
  40449. #ifdef __LITTLE_ENDIAN__
  40450. __ai float16x4_t vrnd_f16(float16x4_t __p0) {
  40451. float16x4_t __ret;
  40452. __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
  40453. return __ret;
  40454. }
  40455. #else
  40456. __ai float16x4_t vrnd_f16(float16x4_t __p0) {
  40457. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40458. float16x4_t __ret;
  40459. __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
  40460. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40461. return __ret;
  40462. }
  40463. #endif
  40464. #ifdef __LITTLE_ENDIAN__
  40465. __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
  40466. float16x8_t __ret;
  40467. __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
  40468. return __ret;
  40469. }
  40470. #else
  40471. __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
  40472. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40473. float16x8_t __ret;
  40474. __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
  40475. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40476. return __ret;
  40477. }
  40478. #endif
  40479. #ifdef __LITTLE_ENDIAN__
  40480. __ai float16x4_t vrnda_f16(float16x4_t __p0) {
  40481. float16x4_t __ret;
  40482. __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
  40483. return __ret;
  40484. }
  40485. #else
  40486. __ai float16x4_t vrnda_f16(float16x4_t __p0) {
  40487. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40488. float16x4_t __ret;
  40489. __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
  40490. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40491. return __ret;
  40492. }
  40493. #endif
  40494. #ifdef __LITTLE_ENDIAN__
  40495. __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
  40496. float16x8_t __ret;
  40497. __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
  40498. return __ret;
  40499. }
  40500. #else
  40501. __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
  40502. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40503. float16x8_t __ret;
  40504. __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
  40505. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40506. return __ret;
  40507. }
  40508. #endif
  40509. #ifdef __LITTLE_ENDIAN__
  40510. __ai float16x4_t vrndi_f16(float16x4_t __p0) {
  40511. float16x4_t __ret;
  40512. __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
  40513. return __ret;
  40514. }
  40515. #else
  40516. __ai float16x4_t vrndi_f16(float16x4_t __p0) {
  40517. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40518. float16x4_t __ret;
  40519. __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
  40520. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40521. return __ret;
  40522. }
  40523. #endif
  40524. #ifdef __LITTLE_ENDIAN__
  40525. __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
  40526. float16x8_t __ret;
  40527. __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
  40528. return __ret;
  40529. }
  40530. #else
  40531. __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
  40532. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40533. float16x8_t __ret;
  40534. __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
  40535. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40536. return __ret;
  40537. }
  40538. #endif
  40539. #ifdef __LITTLE_ENDIAN__
  40540. __ai float16x4_t vrndm_f16(float16x4_t __p0) {
  40541. float16x4_t __ret;
  40542. __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
  40543. return __ret;
  40544. }
  40545. #else
  40546. __ai float16x4_t vrndm_f16(float16x4_t __p0) {
  40547. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40548. float16x4_t __ret;
  40549. __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
  40550. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40551. return __ret;
  40552. }
  40553. #endif
  40554. #ifdef __LITTLE_ENDIAN__
  40555. __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
  40556. float16x8_t __ret;
  40557. __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
  40558. return __ret;
  40559. }
  40560. #else
  40561. __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
  40562. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40563. float16x8_t __ret;
  40564. __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
  40565. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40566. return __ret;
  40567. }
  40568. #endif
  40569. #ifdef __LITTLE_ENDIAN__
  40570. __ai float16x4_t vrndn_f16(float16x4_t __p0) {
  40571. float16x4_t __ret;
  40572. __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
  40573. return __ret;
  40574. }
  40575. #else
  40576. __ai float16x4_t vrndn_f16(float16x4_t __p0) {
  40577. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40578. float16x4_t __ret;
  40579. __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
  40580. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40581. return __ret;
  40582. }
  40583. #endif
  40584. #ifdef __LITTLE_ENDIAN__
  40585. __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
  40586. float16x8_t __ret;
  40587. __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
  40588. return __ret;
  40589. }
  40590. #else
  40591. __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
  40592. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40593. float16x8_t __ret;
  40594. __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
  40595. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40596. return __ret;
  40597. }
  40598. #endif
  40599. #ifdef __LITTLE_ENDIAN__
  40600. __ai float16x4_t vrndp_f16(float16x4_t __p0) {
  40601. float16x4_t __ret;
  40602. __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
  40603. return __ret;
  40604. }
  40605. #else
  40606. __ai float16x4_t vrndp_f16(float16x4_t __p0) {
  40607. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40608. float16x4_t __ret;
  40609. __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
  40610. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40611. return __ret;
  40612. }
  40613. #endif
  40614. #ifdef __LITTLE_ENDIAN__
  40615. __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
  40616. float16x8_t __ret;
  40617. __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
  40618. return __ret;
  40619. }
  40620. #else
  40621. __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
  40622. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40623. float16x8_t __ret;
  40624. __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
  40625. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40626. return __ret;
  40627. }
  40628. #endif
  40629. #ifdef __LITTLE_ENDIAN__
  40630. __ai float16x4_t vrndx_f16(float16x4_t __p0) {
  40631. float16x4_t __ret;
  40632. __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
  40633. return __ret;
  40634. }
  40635. #else
  40636. __ai float16x4_t vrndx_f16(float16x4_t __p0) {
  40637. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40638. float16x4_t __ret;
  40639. __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
  40640. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40641. return __ret;
  40642. }
  40643. #endif
  40644. #ifdef __LITTLE_ENDIAN__
  40645. __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
  40646. float16x8_t __ret;
  40647. __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
  40648. return __ret;
  40649. }
  40650. #else
  40651. __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
  40652. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40653. float16x8_t __ret;
  40654. __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
  40655. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40656. return __ret;
  40657. }
  40658. #endif
  40659. #ifdef __LITTLE_ENDIAN__
  40660. __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
  40661. float16x4_t __ret;
  40662. __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
  40663. return __ret;
  40664. }
  40665. #else
  40666. __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
  40667. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40668. float16x4_t __ret;
  40669. __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
  40670. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40671. return __ret;
  40672. }
  40673. #endif
  40674. #ifdef __LITTLE_ENDIAN__
  40675. __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
  40676. float16x8_t __ret;
  40677. __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
  40678. return __ret;
  40679. }
  40680. #else
  40681. __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
  40682. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40683. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40684. float16x8_t __ret;
  40685. __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40686. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40687. return __ret;
  40688. }
  40689. #endif
  40690. #ifdef __LITTLE_ENDIAN__
  40691. __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
  40692. float16x4_t __ret;
  40693. __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
  40694. return __ret;
  40695. }
  40696. #else
  40697. __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
  40698. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40699. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40700. float16x4_t __ret;
  40701. __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40702. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40703. return __ret;
  40704. }
  40705. #endif
  40706. #ifdef __LITTLE_ENDIAN__
  40707. __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
  40708. float16x8_t __ret;
  40709. __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
  40710. return __ret;
  40711. }
  40712. #else
  40713. __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
  40714. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40715. float16x8_t __ret;
  40716. __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
  40717. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40718. return __ret;
  40719. }
  40720. #endif
  40721. #ifdef __LITTLE_ENDIAN__
  40722. __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
  40723. float16x4_t __ret;
  40724. __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
  40725. return __ret;
  40726. }
  40727. #else
  40728. __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
  40729. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40730. float16x4_t __ret;
  40731. __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
  40732. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40733. return __ret;
  40734. }
  40735. #endif
  40736. #ifdef __LITTLE_ENDIAN__
  40737. __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
  40738. float16x8_t __ret;
  40739. __ret = __p0 - __p1;
  40740. return __ret;
  40741. }
  40742. #else
  40743. __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
  40744. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40745. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40746. float16x8_t __ret;
  40747. __ret = __rev0 - __rev1;
  40748. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40749. return __ret;
  40750. }
  40751. #endif
  40752. #ifdef __LITTLE_ENDIAN__
  40753. __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
  40754. float16x4_t __ret;
  40755. __ret = __p0 - __p1;
  40756. return __ret;
  40757. }
  40758. #else
  40759. __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
  40760. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40761. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40762. float16x4_t __ret;
  40763. __ret = __rev0 - __rev1;
  40764. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40765. return __ret;
  40766. }
  40767. #endif
  40768. #ifdef __LITTLE_ENDIAN__
  40769. __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
  40770. float16x8x2_t __ret;
  40771. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
  40772. return __ret;
  40773. }
  40774. #else
  40775. __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
  40776. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40777. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40778. float16x8x2_t __ret;
  40779. __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40780. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  40781. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  40782. return __ret;
  40783. }
  40784. #endif
  40785. #ifdef __LITTLE_ENDIAN__
  40786. __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
  40787. float16x4x2_t __ret;
  40788. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
  40789. return __ret;
  40790. }
  40791. #else
  40792. __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
  40793. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40794. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40795. float16x4x2_t __ret;
  40796. __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40797. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  40798. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  40799. return __ret;
  40800. }
  40801. #endif
  40802. #ifdef __LITTLE_ENDIAN__
  40803. __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
  40804. float16x8_t __ret;
  40805. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  40806. return __ret;
  40807. }
  40808. #else
  40809. __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
  40810. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40811. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40812. float16x8_t __ret;
  40813. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  40814. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40815. return __ret;
  40816. }
  40817. #endif
  40818. #ifdef __LITTLE_ENDIAN__
  40819. __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
  40820. float16x4_t __ret;
  40821. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  40822. return __ret;
  40823. }
  40824. #else
  40825. __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
  40826. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40827. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40828. float16x4_t __ret;
  40829. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  40830. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40831. return __ret;
  40832. }
  40833. #endif
  40834. #ifdef __LITTLE_ENDIAN__
  40835. __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
  40836. float16x8_t __ret;
  40837. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  40838. return __ret;
  40839. }
  40840. #else
  40841. __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
  40842. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40843. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40844. float16x8_t __ret;
  40845. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  40846. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40847. return __ret;
  40848. }
  40849. #endif
  40850. #ifdef __LITTLE_ENDIAN__
  40851. __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
  40852. float16x4_t __ret;
  40853. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  40854. return __ret;
  40855. }
  40856. #else
  40857. __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
  40858. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40859. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40860. float16x4_t __ret;
  40861. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  40862. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40863. return __ret;
  40864. }
  40865. #endif
  40866. #ifdef __LITTLE_ENDIAN__
  40867. __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
  40868. float16x8x2_t __ret;
  40869. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
  40870. return __ret;
  40871. }
  40872. #else
  40873. __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
  40874. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40875. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40876. float16x8x2_t __ret;
  40877. __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40878. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  40879. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  40880. return __ret;
  40881. }
  40882. #endif
  40883. #ifdef __LITTLE_ENDIAN__
  40884. __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
  40885. float16x4x2_t __ret;
  40886. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
  40887. return __ret;
  40888. }
  40889. #else
  40890. __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
  40891. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40892. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40893. float16x4x2_t __ret;
  40894. __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40895. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  40896. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  40897. return __ret;
  40898. }
  40899. #endif
  40900. #ifdef __LITTLE_ENDIAN__
  40901. __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
  40902. float16x8_t __ret;
  40903. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  40904. return __ret;
  40905. }
  40906. #else
  40907. __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
  40908. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40909. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40910. float16x8_t __ret;
  40911. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  40912. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40913. return __ret;
  40914. }
  40915. #endif
  40916. #ifdef __LITTLE_ENDIAN__
  40917. __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
  40918. float16x4_t __ret;
  40919. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  40920. return __ret;
  40921. }
  40922. #else
  40923. __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
  40924. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40925. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40926. float16x4_t __ret;
  40927. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  40928. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40929. return __ret;
  40930. }
  40931. #endif
  40932. #ifdef __LITTLE_ENDIAN__
  40933. __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
  40934. float16x8_t __ret;
  40935. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  40936. return __ret;
  40937. }
  40938. #else
  40939. __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
  40940. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40941. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40942. float16x8_t __ret;
  40943. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  40944. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  40945. return __ret;
  40946. }
  40947. #endif
  40948. #ifdef __LITTLE_ENDIAN__
  40949. __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
  40950. float16x4_t __ret;
  40951. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  40952. return __ret;
  40953. }
  40954. #else
  40955. __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
  40956. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40957. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40958. float16x4_t __ret;
  40959. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  40960. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  40961. return __ret;
  40962. }
  40963. #endif
  40964. #ifdef __LITTLE_ENDIAN__
  40965. __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
  40966. float16x8x2_t __ret;
  40967. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
  40968. return __ret;
  40969. }
  40970. #else
  40971. __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
  40972. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  40973. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  40974. float16x8x2_t __ret;
  40975. __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
  40976. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
  40977. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
  40978. return __ret;
  40979. }
  40980. #endif
  40981. #ifdef __LITTLE_ENDIAN__
  40982. __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
  40983. float16x4x2_t __ret;
  40984. __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
  40985. return __ret;
  40986. }
  40987. #else
  40988. __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
  40989. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  40990. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  40991. float16x4x2_t __ret;
  40992. __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
  40993. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
  40994. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
  40995. return __ret;
  40996. }
  40997. #endif
  40998. #ifdef __LITTLE_ENDIAN__
  40999. __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
  41000. float16x8_t __ret;
  41001. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  41002. return __ret;
  41003. }
  41004. #else
  41005. __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
  41006. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41007. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41008. float16x8_t __ret;
  41009. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  41010. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41011. return __ret;
  41012. }
  41013. #endif
  41014. #ifdef __LITTLE_ENDIAN__
  41015. __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
  41016. float16x4_t __ret;
  41017. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  41018. return __ret;
  41019. }
  41020. #else
  41021. __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
  41022. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41023. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41024. float16x4_t __ret;
  41025. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  41026. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41027. return __ret;
  41028. }
  41029. #endif
  41030. #ifdef __LITTLE_ENDIAN__
  41031. __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
  41032. float16x8_t __ret;
  41033. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  41034. return __ret;
  41035. }
  41036. #else
  41037. __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
  41038. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41039. float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41040. float16x8_t __ret;
  41041. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  41042. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41043. return __ret;
  41044. }
  41045. #endif
  41046. #ifdef __LITTLE_ENDIAN__
  41047. __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
  41048. float16x4_t __ret;
  41049. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  41050. return __ret;
  41051. }
  41052. #else
  41053. __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
  41054. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41055. float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41056. float16x4_t __ret;
  41057. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  41058. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41059. return __ret;
  41060. }
  41061. #endif
  41062. #endif
  41063. #if defined(__ARM_FEATURE_QRDMX)
  41064. #ifdef __LITTLE_ENDIAN__
  41065. __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41066. int32x4_t __ret;
  41067. __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
  41068. return __ret;
  41069. }
  41070. #else
  41071. __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41072. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41073. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41074. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41075. int32x4_t __ret;
  41076. __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
  41077. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41078. return __ret;
  41079. }
  41080. #endif
  41081. #ifdef __LITTLE_ENDIAN__
  41082. __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41083. int16x8_t __ret;
  41084. __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
  41085. return __ret;
  41086. }
  41087. #else
  41088. __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41089. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41090. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41091. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  41092. int16x8_t __ret;
  41093. __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
  41094. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41095. return __ret;
  41096. }
  41097. #endif
  41098. #ifdef __LITTLE_ENDIAN__
  41099. __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  41100. int32x2_t __ret;
  41101. __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
  41102. return __ret;
  41103. }
  41104. #else
  41105. __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  41106. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41107. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41108. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  41109. int32x2_t __ret;
  41110. __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
  41111. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41112. return __ret;
  41113. }
  41114. #endif
  41115. #ifdef __LITTLE_ENDIAN__
  41116. __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  41117. int16x4_t __ret;
  41118. __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
  41119. return __ret;
  41120. }
  41121. #else
  41122. __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  41123. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41124. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41125. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41126. int16x4_t __ret;
  41127. __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
  41128. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41129. return __ret;
  41130. }
  41131. #endif
  41132. #ifdef __LITTLE_ENDIAN__
  41133. #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41134. int32x4_t __s0 = __p0; \
  41135. int32x4_t __s1 = __p1; \
  41136. int32x2_t __s2 = __p2; \
  41137. int32x4_t __ret; \
  41138. __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41139. __ret; \
  41140. })
  41141. #else
  41142. #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41143. int32x4_t __s0 = __p0; \
  41144. int32x4_t __s1 = __p1; \
  41145. int32x2_t __s2 = __p2; \
  41146. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41147. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41148. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  41149. int32x4_t __ret; \
  41150. __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41151. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41152. __ret; \
  41153. })
  41154. #endif
  41155. #ifdef __LITTLE_ENDIAN__
  41156. #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41157. int16x8_t __s0 = __p0; \
  41158. int16x8_t __s1 = __p1; \
  41159. int16x4_t __s2 = __p2; \
  41160. int16x8_t __ret; \
  41161. __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41162. __ret; \
  41163. })
  41164. #else
  41165. #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41166. int16x8_t __s0 = __p0; \
  41167. int16x8_t __s1 = __p1; \
  41168. int16x4_t __s2 = __p2; \
  41169. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  41170. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  41171. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41172. int16x8_t __ret; \
  41173. __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41174. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  41175. __ret; \
  41176. })
  41177. #endif
  41178. #ifdef __LITTLE_ENDIAN__
  41179. #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41180. int32x2_t __s0 = __p0; \
  41181. int32x2_t __s1 = __p1; \
  41182. int32x2_t __s2 = __p2; \
  41183. int32x2_t __ret; \
  41184. __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
  41185. __ret; \
  41186. })
  41187. #else
  41188. #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41189. int32x2_t __s0 = __p0; \
  41190. int32x2_t __s1 = __p1; \
  41191. int32x2_t __s2 = __p2; \
  41192. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  41193. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  41194. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  41195. int32x2_t __ret; \
  41196. __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
  41197. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  41198. __ret; \
  41199. })
  41200. #endif
  41201. #ifdef __LITTLE_ENDIAN__
  41202. #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41203. int16x4_t __s0 = __p0; \
  41204. int16x4_t __s1 = __p1; \
  41205. int16x4_t __s2 = __p2; \
  41206. int16x4_t __ret; \
  41207. __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41208. __ret; \
  41209. })
  41210. #else
  41211. #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41212. int16x4_t __s0 = __p0; \
  41213. int16x4_t __s1 = __p1; \
  41214. int16x4_t __s2 = __p2; \
  41215. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41216. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41217. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41218. int16x4_t __ret; \
  41219. __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41220. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41221. __ret; \
  41222. })
  41223. #endif
  41224. #ifdef __LITTLE_ENDIAN__
  41225. __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41226. int32x4_t __ret;
  41227. __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
  41228. return __ret;
  41229. }
  41230. #else
  41231. __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41232. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41233. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41234. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41235. int32x4_t __ret;
  41236. __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
  41237. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41238. return __ret;
  41239. }
  41240. #endif
  41241. #ifdef __LITTLE_ENDIAN__
  41242. __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41243. int16x8_t __ret;
  41244. __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
  41245. return __ret;
  41246. }
  41247. #else
  41248. __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41249. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41250. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41251. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  41252. int16x8_t __ret;
  41253. __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
  41254. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41255. return __ret;
  41256. }
  41257. #endif
  41258. #ifdef __LITTLE_ENDIAN__
  41259. __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  41260. int32x2_t __ret;
  41261. __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
  41262. return __ret;
  41263. }
  41264. #else
  41265. __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  41266. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41267. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41268. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  41269. int32x2_t __ret;
  41270. __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
  41271. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41272. return __ret;
  41273. }
  41274. #endif
  41275. #ifdef __LITTLE_ENDIAN__
  41276. __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  41277. int16x4_t __ret;
  41278. __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
  41279. return __ret;
  41280. }
  41281. #else
  41282. __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  41283. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41284. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41285. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41286. int16x4_t __ret;
  41287. __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
  41288. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41289. return __ret;
  41290. }
  41291. #endif
  41292. #ifdef __LITTLE_ENDIAN__
  41293. #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41294. int32x4_t __s0 = __p0; \
  41295. int32x4_t __s1 = __p1; \
  41296. int32x2_t __s2 = __p2; \
  41297. int32x4_t __ret; \
  41298. __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41299. __ret; \
  41300. })
  41301. #else
  41302. #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41303. int32x4_t __s0 = __p0; \
  41304. int32x4_t __s1 = __p1; \
  41305. int32x2_t __s2 = __p2; \
  41306. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41307. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41308. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  41309. int32x4_t __ret; \
  41310. __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41311. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41312. __ret; \
  41313. })
  41314. #endif
  41315. #ifdef __LITTLE_ENDIAN__
  41316. #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41317. int16x8_t __s0 = __p0; \
  41318. int16x8_t __s1 = __p1; \
  41319. int16x4_t __s2 = __p2; \
  41320. int16x8_t __ret; \
  41321. __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41322. __ret; \
  41323. })
  41324. #else
  41325. #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41326. int16x8_t __s0 = __p0; \
  41327. int16x8_t __s1 = __p1; \
  41328. int16x4_t __s2 = __p2; \
  41329. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  41330. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  41331. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41332. int16x8_t __ret; \
  41333. __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41334. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  41335. __ret; \
  41336. })
  41337. #endif
  41338. #ifdef __LITTLE_ENDIAN__
  41339. #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41340. int32x2_t __s0 = __p0; \
  41341. int32x2_t __s1 = __p1; \
  41342. int32x2_t __s2 = __p2; \
  41343. int32x2_t __ret; \
  41344. __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
  41345. __ret; \
  41346. })
  41347. #else
  41348. #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41349. int32x2_t __s0 = __p0; \
  41350. int32x2_t __s1 = __p1; \
  41351. int32x2_t __s2 = __p2; \
  41352. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  41353. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  41354. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  41355. int32x2_t __ret; \
  41356. __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
  41357. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  41358. __ret; \
  41359. })
  41360. #endif
  41361. #ifdef __LITTLE_ENDIAN__
  41362. #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41363. int16x4_t __s0 = __p0; \
  41364. int16x4_t __s1 = __p1; \
  41365. int16x4_t __s2 = __p2; \
  41366. int16x4_t __ret; \
  41367. __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41368. __ret; \
  41369. })
  41370. #else
  41371. #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41372. int16x4_t __s0 = __p0; \
  41373. int16x4_t __s1 = __p1; \
  41374. int16x4_t __s2 = __p2; \
  41375. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41376. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41377. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41378. int16x4_t __ret; \
  41379. __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41380. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41381. __ret; \
  41382. })
  41383. #endif
  41384. #endif
  41385. #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
  41386. #ifdef __LITTLE_ENDIAN__
  41387. #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41388. int32x4_t __s0 = __p0; \
  41389. int32x4_t __s1 = __p1; \
  41390. int32x4_t __s2 = __p2; \
  41391. int32x4_t __ret; \
  41392. __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41393. __ret; \
  41394. })
  41395. #else
  41396. #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41397. int32x4_t __s0 = __p0; \
  41398. int32x4_t __s1 = __p1; \
  41399. int32x4_t __s2 = __p2; \
  41400. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41401. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41402. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41403. int32x4_t __ret; \
  41404. __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41405. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41406. __ret; \
  41407. })
  41408. #endif
  41409. #ifdef __LITTLE_ENDIAN__
  41410. #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41411. int16x8_t __s0 = __p0; \
  41412. int16x8_t __s1 = __p1; \
  41413. int16x8_t __s2 = __p2; \
  41414. int16x8_t __ret; \
  41415. __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41416. __ret; \
  41417. })
  41418. #else
  41419. #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41420. int16x8_t __s0 = __p0; \
  41421. int16x8_t __s1 = __p1; \
  41422. int16x8_t __s2 = __p2; \
  41423. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  41424. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  41425. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  41426. int16x8_t __ret; \
  41427. __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41428. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  41429. __ret; \
  41430. })
  41431. #endif
  41432. #ifdef __LITTLE_ENDIAN__
  41433. #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41434. int32x2_t __s0 = __p0; \
  41435. int32x2_t __s1 = __p1; \
  41436. int32x4_t __s2 = __p2; \
  41437. int32x2_t __ret; \
  41438. __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
  41439. __ret; \
  41440. })
  41441. #else
  41442. #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41443. int32x2_t __s0 = __p0; \
  41444. int32x2_t __s1 = __p1; \
  41445. int32x4_t __s2 = __p2; \
  41446. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  41447. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  41448. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41449. int32x2_t __ret; \
  41450. __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
  41451. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  41452. __ret; \
  41453. })
  41454. #endif
  41455. #ifdef __LITTLE_ENDIAN__
  41456. #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41457. int16x4_t __s0 = __p0; \
  41458. int16x4_t __s1 = __p1; \
  41459. int16x8_t __s2 = __p2; \
  41460. int16x4_t __ret; \
  41461. __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41462. __ret; \
  41463. })
  41464. #else
  41465. #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41466. int16x4_t __s0 = __p0; \
  41467. int16x4_t __s1 = __p1; \
  41468. int16x8_t __s2 = __p2; \
  41469. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41470. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41471. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  41472. int16x4_t __ret; \
  41473. __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41474. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41475. __ret; \
  41476. })
  41477. #endif
  41478. #ifdef __LITTLE_ENDIAN__
  41479. #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41480. int32x4_t __s0 = __p0; \
  41481. int32x4_t __s1 = __p1; \
  41482. int32x4_t __s2 = __p2; \
  41483. int32x4_t __ret; \
  41484. __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41485. __ret; \
  41486. })
  41487. #else
  41488. #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41489. int32x4_t __s0 = __p0; \
  41490. int32x4_t __s1 = __p1; \
  41491. int32x4_t __s2 = __p2; \
  41492. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41493. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41494. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41495. int32x4_t __ret; \
  41496. __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41497. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41498. __ret; \
  41499. })
  41500. #endif
  41501. #ifdef __LITTLE_ENDIAN__
  41502. #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41503. int16x8_t __s0 = __p0; \
  41504. int16x8_t __s1 = __p1; \
  41505. int16x8_t __s2 = __p2; \
  41506. int16x8_t __ret; \
  41507. __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41508. __ret; \
  41509. })
  41510. #else
  41511. #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41512. int16x8_t __s0 = __p0; \
  41513. int16x8_t __s1 = __p1; \
  41514. int16x8_t __s2 = __p2; \
  41515. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  41516. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  41517. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  41518. int16x8_t __ret; \
  41519. __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
  41520. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  41521. __ret; \
  41522. })
  41523. #endif
  41524. #ifdef __LITTLE_ENDIAN__
  41525. #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41526. int32x2_t __s0 = __p0; \
  41527. int32x2_t __s1 = __p1; \
  41528. int32x4_t __s2 = __p2; \
  41529. int32x2_t __ret; \
  41530. __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
  41531. __ret; \
  41532. })
  41533. #else
  41534. #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  41535. int32x2_t __s0 = __p0; \
  41536. int32x2_t __s1 = __p1; \
  41537. int32x4_t __s2 = __p2; \
  41538. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  41539. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  41540. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  41541. int32x2_t __ret; \
  41542. __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
  41543. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  41544. __ret; \
  41545. })
  41546. #endif
  41547. #ifdef __LITTLE_ENDIAN__
  41548. #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41549. int16x4_t __s0 = __p0; \
  41550. int16x4_t __s1 = __p1; \
  41551. int16x8_t __s2 = __p2; \
  41552. int16x4_t __ret; \
  41553. __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
  41554. __ret; \
  41555. })
  41556. #else
  41557. #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  41558. int16x4_t __s0 = __p0; \
  41559. int16x4_t __s1 = __p1; \
  41560. int16x8_t __s2 = __p2; \
  41561. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  41562. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  41563. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  41564. int16x4_t __ret; \
  41565. __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
  41566. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  41567. __ret; \
  41568. })
  41569. #endif
  41570. #endif
  41571. #if defined(__aarch64__)
  41572. #ifdef __LITTLE_ENDIAN__
  41573. __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
  41574. float64x2_t __ret;
  41575. __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  41576. return __ret;
  41577. }
  41578. #else
  41579. __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
  41580. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41581. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41582. float64x2_t __ret;
  41583. __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  41584. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41585. return __ret;
  41586. }
  41587. #endif
  41588. #ifdef __LITTLE_ENDIAN__
  41589. __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
  41590. float64x1_t __ret;
  41591. __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  41592. return __ret;
  41593. }
  41594. #else
  41595. __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
  41596. float64x1_t __ret;
  41597. __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  41598. return __ret;
  41599. }
  41600. #endif
  41601. #ifdef __LITTLE_ENDIAN__
  41602. __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
  41603. float64_t __ret;
  41604. __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
  41605. return __ret;
  41606. }
  41607. #else
  41608. __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
  41609. float64_t __ret;
  41610. __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
  41611. return __ret;
  41612. }
  41613. #endif
  41614. #ifdef __LITTLE_ENDIAN__
  41615. __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
  41616. float32_t __ret;
  41617. __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
  41618. return __ret;
  41619. }
  41620. #else
  41621. __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
  41622. float32_t __ret;
  41623. __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
  41624. return __ret;
  41625. }
  41626. #endif
  41627. #ifdef __LITTLE_ENDIAN__
  41628. __ai float64x2_t vabsq_f64(float64x2_t __p0) {
  41629. float64x2_t __ret;
  41630. __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
  41631. return __ret;
  41632. }
  41633. #else
  41634. __ai float64x2_t vabsq_f64(float64x2_t __p0) {
  41635. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41636. float64x2_t __ret;
  41637. __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
  41638. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41639. return __ret;
  41640. }
  41641. #endif
  41642. #ifdef __LITTLE_ENDIAN__
  41643. __ai int64x2_t vabsq_s64(int64x2_t __p0) {
  41644. int64x2_t __ret;
  41645. __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
  41646. return __ret;
  41647. }
  41648. #else
  41649. __ai int64x2_t vabsq_s64(int64x2_t __p0) {
  41650. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41651. int64x2_t __ret;
  41652. __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
  41653. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41654. return __ret;
  41655. }
  41656. #endif
  41657. #ifdef __LITTLE_ENDIAN__
  41658. __ai float64x1_t vabs_f64(float64x1_t __p0) {
  41659. float64x1_t __ret;
  41660. __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
  41661. return __ret;
  41662. }
  41663. #else
  41664. __ai float64x1_t vabs_f64(float64x1_t __p0) {
  41665. float64x1_t __ret;
  41666. __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
  41667. return __ret;
  41668. }
  41669. #endif
  41670. #ifdef __LITTLE_ENDIAN__
  41671. __ai int64x1_t vabs_s64(int64x1_t __p0) {
  41672. int64x1_t __ret;
  41673. __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
  41674. return __ret;
  41675. }
  41676. #else
  41677. __ai int64x1_t vabs_s64(int64x1_t __p0) {
  41678. int64x1_t __ret;
  41679. __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
  41680. return __ret;
  41681. }
  41682. #endif
  41683. #ifdef __LITTLE_ENDIAN__
  41684. __ai int64_t vabsd_s64(int64_t __p0) {
  41685. int64_t __ret;
  41686. __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
  41687. return __ret;
  41688. }
  41689. #else
  41690. __ai int64_t vabsd_s64(int64_t __p0) {
  41691. int64_t __ret;
  41692. __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
  41693. return __ret;
  41694. }
  41695. #endif
  41696. #ifdef __LITTLE_ENDIAN__
  41697. __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
  41698. float64x2_t __ret;
  41699. __ret = __p0 + __p1;
  41700. return __ret;
  41701. }
  41702. #else
  41703. __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
  41704. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41705. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41706. float64x2_t __ret;
  41707. __ret = __rev0 + __rev1;
  41708. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  41709. return __ret;
  41710. }
  41711. #endif
  41712. #ifdef __LITTLE_ENDIAN__
  41713. __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
  41714. float64x1_t __ret;
  41715. __ret = __p0 + __p1;
  41716. return __ret;
  41717. }
  41718. #else
  41719. __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
  41720. float64x1_t __ret;
  41721. __ret = __p0 + __p1;
  41722. return __ret;
  41723. }
  41724. #endif
  41725. #ifdef __LITTLE_ENDIAN__
  41726. __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
  41727. uint64_t __ret;
  41728. __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
  41729. return __ret;
  41730. }
  41731. #else
  41732. __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
  41733. uint64_t __ret;
  41734. __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
  41735. return __ret;
  41736. }
  41737. #endif
  41738. #ifdef __LITTLE_ENDIAN__
  41739. __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
  41740. int64_t __ret;
  41741. __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
  41742. return __ret;
  41743. }
  41744. #else
  41745. __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
  41746. int64_t __ret;
  41747. __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
  41748. return __ret;
  41749. }
  41750. #endif
  41751. #ifdef __LITTLE_ENDIAN__
  41752. __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  41753. uint16x8_t __ret;
  41754. __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
  41755. return __ret;
  41756. }
  41757. #else
  41758. __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  41759. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41760. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41761. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41762. uint16x8_t __ret;
  41763. __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
  41764. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41765. return __ret;
  41766. }
  41767. #endif
  41768. #ifdef __LITTLE_ENDIAN__
  41769. __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  41770. uint32x4_t __ret;
  41771. __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
  41772. return __ret;
  41773. }
  41774. #else
  41775. __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  41776. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41777. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41778. uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  41779. uint32x4_t __ret;
  41780. __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
  41781. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41782. return __ret;
  41783. }
  41784. #endif
  41785. #ifdef __LITTLE_ENDIAN__
  41786. __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  41787. uint8x16_t __ret;
  41788. __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
  41789. return __ret;
  41790. }
  41791. #else
  41792. __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  41793. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41794. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41795. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  41796. uint8x16_t __ret;
  41797. __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
  41798. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  41799. return __ret;
  41800. }
  41801. #endif
  41802. #ifdef __LITTLE_ENDIAN__
  41803. __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41804. int16x8_t __ret;
  41805. __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
  41806. return __ret;
  41807. }
  41808. #else
  41809. __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  41810. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41811. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  41812. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  41813. int16x8_t __ret;
  41814. __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
  41815. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  41816. return __ret;
  41817. }
  41818. #endif
  41819. #ifdef __LITTLE_ENDIAN__
  41820. __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  41821. int32x4_t __ret;
  41822. __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
  41823. return __ret;
  41824. }
  41825. #else
  41826. __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  41827. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41828. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  41829. int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  41830. int32x4_t __ret;
  41831. __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
  41832. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  41833. return __ret;
  41834. }
  41835. #endif
  41836. #ifdef __LITTLE_ENDIAN__
  41837. __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41838. int8x16_t __ret;
  41839. __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
  41840. return __ret;
  41841. }
  41842. #else
  41843. __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  41844. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41845. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  41846. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  41847. int8x16_t __ret;
  41848. __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
  41849. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  41850. return __ret;
  41851. }
  41852. #endif
  41853. #ifdef __LITTLE_ENDIAN__
  41854. __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
  41855. uint16_t __ret;
  41856. __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
  41857. return __ret;
  41858. }
  41859. #else
  41860. __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
  41861. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  41862. uint16_t __ret;
  41863. __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
  41864. return __ret;
  41865. }
  41866. #endif
  41867. #ifdef __LITTLE_ENDIAN__
  41868. __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
  41869. uint64_t __ret;
  41870. __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
  41871. return __ret;
  41872. }
  41873. #else
  41874. __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
  41875. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41876. uint64_t __ret;
  41877. __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
  41878. return __ret;
  41879. }
  41880. #endif
  41881. #ifdef __LITTLE_ENDIAN__
  41882. __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
  41883. uint32_t __ret;
  41884. __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
  41885. return __ret;
  41886. }
  41887. #else
  41888. __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
  41889. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41890. uint32_t __ret;
  41891. __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
  41892. return __ret;
  41893. }
  41894. #endif
  41895. #ifdef __LITTLE_ENDIAN__
  41896. __ai int16_t vaddlvq_s8(int8x16_t __p0) {
  41897. int16_t __ret;
  41898. __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
  41899. return __ret;
  41900. }
  41901. #else
  41902. __ai int16_t vaddlvq_s8(int8x16_t __p0) {
  41903. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  41904. int16_t __ret;
  41905. __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
  41906. return __ret;
  41907. }
  41908. #endif
  41909. #ifdef __LITTLE_ENDIAN__
  41910. __ai int64_t vaddlvq_s32(int32x4_t __p0) {
  41911. int64_t __ret;
  41912. __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
  41913. return __ret;
  41914. }
  41915. #else
  41916. __ai int64_t vaddlvq_s32(int32x4_t __p0) {
  41917. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41918. int64_t __ret;
  41919. __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
  41920. return __ret;
  41921. }
  41922. #endif
  41923. #ifdef __LITTLE_ENDIAN__
  41924. __ai int32_t vaddlvq_s16(int16x8_t __p0) {
  41925. int32_t __ret;
  41926. __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
  41927. return __ret;
  41928. }
  41929. #else
  41930. __ai int32_t vaddlvq_s16(int16x8_t __p0) {
  41931. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41932. int32_t __ret;
  41933. __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
  41934. return __ret;
  41935. }
  41936. #endif
  41937. #ifdef __LITTLE_ENDIAN__
  41938. __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
  41939. uint16_t __ret;
  41940. __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
  41941. return __ret;
  41942. }
  41943. #else
  41944. __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
  41945. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41946. uint16_t __ret;
  41947. __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
  41948. return __ret;
  41949. }
  41950. #endif
  41951. #ifdef __LITTLE_ENDIAN__
  41952. __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
  41953. uint64_t __ret;
  41954. __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
  41955. return __ret;
  41956. }
  41957. #else
  41958. __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
  41959. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  41960. uint64_t __ret;
  41961. __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
  41962. return __ret;
  41963. }
  41964. #endif
  41965. #ifdef __LITTLE_ENDIAN__
  41966. __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
  41967. uint32_t __ret;
  41968. __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
  41969. return __ret;
  41970. }
  41971. #else
  41972. __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
  41973. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  41974. uint32_t __ret;
  41975. __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
  41976. return __ret;
  41977. }
  41978. #endif
  41979. #ifdef __LITTLE_ENDIAN__
  41980. __ai int16_t vaddlv_s8(int8x8_t __p0) {
  41981. int16_t __ret;
  41982. __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
  41983. return __ret;
  41984. }
  41985. #else
  41986. __ai int16_t vaddlv_s8(int8x8_t __p0) {
  41987. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  41988. int16_t __ret;
  41989. __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
  41990. return __ret;
  41991. }
  41992. #endif
  41993. #ifdef __LITTLE_ENDIAN__
  41994. __ai int64_t vaddlv_s32(int32x2_t __p0) {
  41995. int64_t __ret;
  41996. __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
  41997. return __ret;
  41998. }
  41999. #else
  42000. __ai int64_t vaddlv_s32(int32x2_t __p0) {
  42001. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42002. int64_t __ret;
  42003. __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
  42004. return __ret;
  42005. }
  42006. #endif
  42007. #ifdef __LITTLE_ENDIAN__
  42008. __ai int32_t vaddlv_s16(int16x4_t __p0) {
  42009. int32_t __ret;
  42010. __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
  42011. return __ret;
  42012. }
  42013. #else
  42014. __ai int32_t vaddlv_s16(int16x4_t __p0) {
  42015. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42016. int32_t __ret;
  42017. __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
  42018. return __ret;
  42019. }
  42020. #endif
  42021. #ifdef __LITTLE_ENDIAN__
  42022. __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
  42023. uint8_t __ret;
  42024. __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
  42025. return __ret;
  42026. }
  42027. #else
  42028. __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
  42029. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42030. uint8_t __ret;
  42031. __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
  42032. return __ret;
  42033. }
  42034. #endif
  42035. #ifdef __LITTLE_ENDIAN__
  42036. __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
  42037. uint32_t __ret;
  42038. __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
  42039. return __ret;
  42040. }
  42041. #else
  42042. __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
  42043. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42044. uint32_t __ret;
  42045. __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
  42046. return __ret;
  42047. }
  42048. #endif
  42049. #ifdef __LITTLE_ENDIAN__
  42050. __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
  42051. uint64_t __ret;
  42052. __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
  42053. return __ret;
  42054. }
  42055. #else
  42056. __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
  42057. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42058. uint64_t __ret;
  42059. __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
  42060. return __ret;
  42061. }
  42062. #endif
  42063. #ifdef __LITTLE_ENDIAN__
  42064. __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
  42065. uint16_t __ret;
  42066. __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
  42067. return __ret;
  42068. }
  42069. #else
  42070. __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
  42071. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42072. uint16_t __ret;
  42073. __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
  42074. return __ret;
  42075. }
  42076. #endif
  42077. #ifdef __LITTLE_ENDIAN__
  42078. __ai int8_t vaddvq_s8(int8x16_t __p0) {
  42079. int8_t __ret;
  42080. __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
  42081. return __ret;
  42082. }
  42083. #else
  42084. __ai int8_t vaddvq_s8(int8x16_t __p0) {
  42085. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42086. int8_t __ret;
  42087. __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
  42088. return __ret;
  42089. }
  42090. #endif
  42091. #ifdef __LITTLE_ENDIAN__
  42092. __ai float64_t vaddvq_f64(float64x2_t __p0) {
  42093. float64_t __ret;
  42094. __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
  42095. return __ret;
  42096. }
  42097. #else
  42098. __ai float64_t vaddvq_f64(float64x2_t __p0) {
  42099. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42100. float64_t __ret;
  42101. __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
  42102. return __ret;
  42103. }
  42104. #endif
  42105. #ifdef __LITTLE_ENDIAN__
  42106. __ai float32_t vaddvq_f32(float32x4_t __p0) {
  42107. float32_t __ret;
  42108. __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
  42109. return __ret;
  42110. }
  42111. #else
  42112. __ai float32_t vaddvq_f32(float32x4_t __p0) {
  42113. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42114. float32_t __ret;
  42115. __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
  42116. return __ret;
  42117. }
  42118. #endif
  42119. #ifdef __LITTLE_ENDIAN__
  42120. __ai int32_t vaddvq_s32(int32x4_t __p0) {
  42121. int32_t __ret;
  42122. __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
  42123. return __ret;
  42124. }
  42125. #else
  42126. __ai int32_t vaddvq_s32(int32x4_t __p0) {
  42127. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42128. int32_t __ret;
  42129. __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
  42130. return __ret;
  42131. }
  42132. #endif
  42133. #ifdef __LITTLE_ENDIAN__
  42134. __ai int64_t vaddvq_s64(int64x2_t __p0) {
  42135. int64_t __ret;
  42136. __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
  42137. return __ret;
  42138. }
  42139. #else
  42140. __ai int64_t vaddvq_s64(int64x2_t __p0) {
  42141. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42142. int64_t __ret;
  42143. __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
  42144. return __ret;
  42145. }
  42146. #endif
  42147. #ifdef __LITTLE_ENDIAN__
  42148. __ai int16_t vaddvq_s16(int16x8_t __p0) {
  42149. int16_t __ret;
  42150. __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
  42151. return __ret;
  42152. }
  42153. #else
  42154. __ai int16_t vaddvq_s16(int16x8_t __p0) {
  42155. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42156. int16_t __ret;
  42157. __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
  42158. return __ret;
  42159. }
  42160. #endif
  42161. #ifdef __LITTLE_ENDIAN__
  42162. __ai uint8_t vaddv_u8(uint8x8_t __p0) {
  42163. uint8_t __ret;
  42164. __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
  42165. return __ret;
  42166. }
  42167. #else
  42168. __ai uint8_t vaddv_u8(uint8x8_t __p0) {
  42169. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42170. uint8_t __ret;
  42171. __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
  42172. return __ret;
  42173. }
  42174. #endif
  42175. #ifdef __LITTLE_ENDIAN__
  42176. __ai uint32_t vaddv_u32(uint32x2_t __p0) {
  42177. uint32_t __ret;
  42178. __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
  42179. return __ret;
  42180. }
  42181. #else
  42182. __ai uint32_t vaddv_u32(uint32x2_t __p0) {
  42183. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42184. uint32_t __ret;
  42185. __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
  42186. return __ret;
  42187. }
  42188. #endif
  42189. #ifdef __LITTLE_ENDIAN__
  42190. __ai uint16_t vaddv_u16(uint16x4_t __p0) {
  42191. uint16_t __ret;
  42192. __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
  42193. return __ret;
  42194. }
  42195. #else
  42196. __ai uint16_t vaddv_u16(uint16x4_t __p0) {
  42197. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42198. uint16_t __ret;
  42199. __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
  42200. return __ret;
  42201. }
  42202. #endif
  42203. #ifdef __LITTLE_ENDIAN__
  42204. __ai int8_t vaddv_s8(int8x8_t __p0) {
  42205. int8_t __ret;
  42206. __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
  42207. return __ret;
  42208. }
  42209. #else
  42210. __ai int8_t vaddv_s8(int8x8_t __p0) {
  42211. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42212. int8_t __ret;
  42213. __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
  42214. return __ret;
  42215. }
  42216. #endif
  42217. #ifdef __LITTLE_ENDIAN__
  42218. __ai float32_t vaddv_f32(float32x2_t __p0) {
  42219. float32_t __ret;
  42220. __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
  42221. return __ret;
  42222. }
  42223. #else
  42224. __ai float32_t vaddv_f32(float32x2_t __p0) {
  42225. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42226. float32_t __ret;
  42227. __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
  42228. return __ret;
  42229. }
  42230. #endif
  42231. #ifdef __LITTLE_ENDIAN__
  42232. __ai int32_t vaddv_s32(int32x2_t __p0) {
  42233. int32_t __ret;
  42234. __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
  42235. return __ret;
  42236. }
  42237. #else
  42238. __ai int32_t vaddv_s32(int32x2_t __p0) {
  42239. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42240. int32_t __ret;
  42241. __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
  42242. return __ret;
  42243. }
  42244. #endif
  42245. #ifdef __LITTLE_ENDIAN__
  42246. __ai int16_t vaddv_s16(int16x4_t __p0) {
  42247. int16_t __ret;
  42248. __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
  42249. return __ret;
  42250. }
  42251. #else
  42252. __ai int16_t vaddv_s16(int16x4_t __p0) {
  42253. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42254. int16_t __ret;
  42255. __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
  42256. return __ret;
  42257. }
  42258. #endif
  42259. #ifdef __LITTLE_ENDIAN__
  42260. __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
  42261. poly64x1_t __ret;
  42262. __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
  42263. return __ret;
  42264. }
  42265. #else
  42266. __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
  42267. poly64x1_t __ret;
  42268. __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
  42269. return __ret;
  42270. }
  42271. #endif
  42272. #ifdef __LITTLE_ENDIAN__
  42273. __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
  42274. poly64x2_t __ret;
  42275. __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
  42276. return __ret;
  42277. }
  42278. #else
  42279. __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
  42280. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42281. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42282. poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  42283. poly64x2_t __ret;
  42284. __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
  42285. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42286. return __ret;
  42287. }
  42288. #endif
  42289. #ifdef __LITTLE_ENDIAN__
  42290. __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  42291. float64x2_t __ret;
  42292. __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
  42293. return __ret;
  42294. }
  42295. #else
  42296. __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  42297. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42298. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42299. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  42300. float64x2_t __ret;
  42301. __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
  42302. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42303. return __ret;
  42304. }
  42305. #endif
  42306. #ifdef __LITTLE_ENDIAN__
  42307. __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  42308. float64x1_t __ret;
  42309. __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
  42310. return __ret;
  42311. }
  42312. #else
  42313. __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  42314. float64x1_t __ret;
  42315. __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
  42316. return __ret;
  42317. }
  42318. #endif
  42319. #ifdef __LITTLE_ENDIAN__
  42320. __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
  42321. uint64x2_t __ret;
  42322. __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  42323. return __ret;
  42324. }
  42325. #else
  42326. __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
  42327. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42328. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42329. uint64x2_t __ret;
  42330. __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  42331. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42332. return __ret;
  42333. }
  42334. #endif
  42335. #ifdef __LITTLE_ENDIAN__
  42336. __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
  42337. uint64x1_t __ret;
  42338. __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42339. return __ret;
  42340. }
  42341. #else
  42342. __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
  42343. uint64x1_t __ret;
  42344. __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42345. return __ret;
  42346. }
  42347. #endif
  42348. #ifdef __LITTLE_ENDIAN__
  42349. __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
  42350. uint64_t __ret;
  42351. __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
  42352. return __ret;
  42353. }
  42354. #else
  42355. __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
  42356. uint64_t __ret;
  42357. __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
  42358. return __ret;
  42359. }
  42360. #endif
  42361. #ifdef __LITTLE_ENDIAN__
  42362. __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
  42363. uint32_t __ret;
  42364. __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
  42365. return __ret;
  42366. }
  42367. #else
  42368. __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
  42369. uint32_t __ret;
  42370. __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
  42371. return __ret;
  42372. }
  42373. #endif
  42374. #ifdef __LITTLE_ENDIAN__
  42375. __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
  42376. uint64x2_t __ret;
  42377. __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  42378. return __ret;
  42379. }
  42380. #else
  42381. __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
  42382. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42383. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42384. uint64x2_t __ret;
  42385. __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  42386. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42387. return __ret;
  42388. }
  42389. #endif
  42390. #ifdef __LITTLE_ENDIAN__
  42391. __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
  42392. uint64x1_t __ret;
  42393. __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42394. return __ret;
  42395. }
  42396. #else
  42397. __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
  42398. uint64x1_t __ret;
  42399. __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42400. return __ret;
  42401. }
  42402. #endif
  42403. #ifdef __LITTLE_ENDIAN__
  42404. __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
  42405. uint64_t __ret;
  42406. __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
  42407. return __ret;
  42408. }
  42409. #else
  42410. __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
  42411. uint64_t __ret;
  42412. __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
  42413. return __ret;
  42414. }
  42415. #endif
  42416. #ifdef __LITTLE_ENDIAN__
  42417. __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
  42418. uint32_t __ret;
  42419. __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
  42420. return __ret;
  42421. }
  42422. #else
  42423. __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
  42424. uint32_t __ret;
  42425. __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
  42426. return __ret;
  42427. }
  42428. #endif
  42429. #ifdef __LITTLE_ENDIAN__
  42430. __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
  42431. uint64x2_t __ret;
  42432. __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  42433. return __ret;
  42434. }
  42435. #else
  42436. __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
  42437. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42438. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42439. uint64x2_t __ret;
  42440. __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  42441. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42442. return __ret;
  42443. }
  42444. #endif
  42445. #ifdef __LITTLE_ENDIAN__
  42446. __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
  42447. uint64x1_t __ret;
  42448. __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42449. return __ret;
  42450. }
  42451. #else
  42452. __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
  42453. uint64x1_t __ret;
  42454. __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42455. return __ret;
  42456. }
  42457. #endif
  42458. #ifdef __LITTLE_ENDIAN__
  42459. __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
  42460. uint64_t __ret;
  42461. __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
  42462. return __ret;
  42463. }
  42464. #else
  42465. __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
  42466. uint64_t __ret;
  42467. __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
  42468. return __ret;
  42469. }
  42470. #endif
  42471. #ifdef __LITTLE_ENDIAN__
  42472. __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
  42473. uint32_t __ret;
  42474. __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
  42475. return __ret;
  42476. }
  42477. #else
  42478. __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
  42479. uint32_t __ret;
  42480. __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
  42481. return __ret;
  42482. }
  42483. #endif
  42484. #ifdef __LITTLE_ENDIAN__
  42485. __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
  42486. uint64x2_t __ret;
  42487. __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  42488. return __ret;
  42489. }
  42490. #else
  42491. __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
  42492. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42493. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42494. uint64x2_t __ret;
  42495. __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  42496. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42497. return __ret;
  42498. }
  42499. #endif
  42500. #ifdef __LITTLE_ENDIAN__
  42501. __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
  42502. uint64x1_t __ret;
  42503. __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42504. return __ret;
  42505. }
  42506. #else
  42507. __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
  42508. uint64x1_t __ret;
  42509. __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  42510. return __ret;
  42511. }
  42512. #endif
  42513. #ifdef __LITTLE_ENDIAN__
  42514. __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
  42515. uint64_t __ret;
  42516. __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
  42517. return __ret;
  42518. }
  42519. #else
  42520. __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
  42521. uint64_t __ret;
  42522. __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
  42523. return __ret;
  42524. }
  42525. #endif
  42526. #ifdef __LITTLE_ENDIAN__
  42527. __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
  42528. uint32_t __ret;
  42529. __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
  42530. return __ret;
  42531. }
  42532. #else
  42533. __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
  42534. uint32_t __ret;
  42535. __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
  42536. return __ret;
  42537. }
  42538. #endif
  42539. #ifdef __LITTLE_ENDIAN__
  42540. __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
  42541. uint64x1_t __ret;
  42542. __ret = (uint64x1_t)(__p0 == __p1);
  42543. return __ret;
  42544. }
  42545. #else
  42546. __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
  42547. uint64x1_t __ret;
  42548. __ret = (uint64x1_t)(__p0 == __p1);
  42549. return __ret;
  42550. }
  42551. #endif
  42552. #ifdef __LITTLE_ENDIAN__
  42553. __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
  42554. uint64x2_t __ret;
  42555. __ret = (uint64x2_t)(__p0 == __p1);
  42556. return __ret;
  42557. }
  42558. #else
  42559. __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
  42560. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42561. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42562. uint64x2_t __ret;
  42563. __ret = (uint64x2_t)(__rev0 == __rev1);
  42564. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42565. return __ret;
  42566. }
  42567. #endif
  42568. #ifdef __LITTLE_ENDIAN__
  42569. __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  42570. uint64x2_t __ret;
  42571. __ret = (uint64x2_t)(__p0 == __p1);
  42572. return __ret;
  42573. }
  42574. #else
  42575. __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  42576. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42577. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42578. uint64x2_t __ret;
  42579. __ret = (uint64x2_t)(__rev0 == __rev1);
  42580. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42581. return __ret;
  42582. }
  42583. #endif
  42584. #ifdef __LITTLE_ENDIAN__
  42585. __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
  42586. uint64x2_t __ret;
  42587. __ret = (uint64x2_t)(__p0 == __p1);
  42588. return __ret;
  42589. }
  42590. #else
  42591. __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
  42592. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42593. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42594. uint64x2_t __ret;
  42595. __ret = (uint64x2_t)(__rev0 == __rev1);
  42596. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42597. return __ret;
  42598. }
  42599. #endif
  42600. #ifdef __LITTLE_ENDIAN__
  42601. __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
  42602. uint64x2_t __ret;
  42603. __ret = (uint64x2_t)(__p0 == __p1);
  42604. return __ret;
  42605. }
  42606. #else
  42607. __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
  42608. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42609. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  42610. uint64x2_t __ret;
  42611. __ret = (uint64x2_t)(__rev0 == __rev1);
  42612. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42613. return __ret;
  42614. }
  42615. #endif
  42616. #ifdef __LITTLE_ENDIAN__
  42617. __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
  42618. uint64x1_t __ret;
  42619. __ret = (uint64x1_t)(__p0 == __p1);
  42620. return __ret;
  42621. }
  42622. #else
  42623. __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
  42624. uint64x1_t __ret;
  42625. __ret = (uint64x1_t)(__p0 == __p1);
  42626. return __ret;
  42627. }
  42628. #endif
  42629. #ifdef __LITTLE_ENDIAN__
  42630. __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
  42631. uint64x1_t __ret;
  42632. __ret = (uint64x1_t)(__p0 == __p1);
  42633. return __ret;
  42634. }
  42635. #else
  42636. __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
  42637. uint64x1_t __ret;
  42638. __ret = (uint64x1_t)(__p0 == __p1);
  42639. return __ret;
  42640. }
  42641. #endif
  42642. #ifdef __LITTLE_ENDIAN__
  42643. __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
  42644. uint64x1_t __ret;
  42645. __ret = (uint64x1_t)(__p0 == __p1);
  42646. return __ret;
  42647. }
  42648. #else
  42649. __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
  42650. uint64x1_t __ret;
  42651. __ret = (uint64x1_t)(__p0 == __p1);
  42652. return __ret;
  42653. }
  42654. #endif
  42655. #ifdef __LITTLE_ENDIAN__
  42656. __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
  42657. uint64_t __ret;
  42658. __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
  42659. return __ret;
  42660. }
  42661. #else
  42662. __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
  42663. uint64_t __ret;
  42664. __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
  42665. return __ret;
  42666. }
  42667. #endif
  42668. #ifdef __LITTLE_ENDIAN__
  42669. __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
  42670. int64_t __ret;
  42671. __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
  42672. return __ret;
  42673. }
  42674. #else
  42675. __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
  42676. int64_t __ret;
  42677. __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
  42678. return __ret;
  42679. }
  42680. #endif
  42681. #ifdef __LITTLE_ENDIAN__
  42682. __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
  42683. uint64_t __ret;
  42684. __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
  42685. return __ret;
  42686. }
  42687. #else
  42688. __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
  42689. uint64_t __ret;
  42690. __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
  42691. return __ret;
  42692. }
  42693. #endif
  42694. #ifdef __LITTLE_ENDIAN__
  42695. __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
  42696. uint32_t __ret;
  42697. __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
  42698. return __ret;
  42699. }
  42700. #else
  42701. __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
  42702. uint32_t __ret;
  42703. __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
  42704. return __ret;
  42705. }
  42706. #endif
  42707. #ifdef __LITTLE_ENDIAN__
  42708. __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
  42709. uint8x8_t __ret;
  42710. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
  42711. return __ret;
  42712. }
  42713. #else
  42714. __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
  42715. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42716. uint8x8_t __ret;
  42717. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
  42718. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  42719. return __ret;
  42720. }
  42721. #endif
  42722. #ifdef __LITTLE_ENDIAN__
  42723. __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
  42724. uint64x1_t __ret;
  42725. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  42726. return __ret;
  42727. }
  42728. #else
  42729. __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
  42730. uint64x1_t __ret;
  42731. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  42732. return __ret;
  42733. }
  42734. #endif
  42735. #ifdef __LITTLE_ENDIAN__
  42736. __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
  42737. uint16x4_t __ret;
  42738. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
  42739. return __ret;
  42740. }
  42741. #else
  42742. __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
  42743. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42744. uint16x4_t __ret;
  42745. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
  42746. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  42747. return __ret;
  42748. }
  42749. #endif
  42750. #ifdef __LITTLE_ENDIAN__
  42751. __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
  42752. uint8x16_t __ret;
  42753. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
  42754. return __ret;
  42755. }
  42756. #else
  42757. __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
  42758. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42759. uint8x16_t __ret;
  42760. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
  42761. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42762. return __ret;
  42763. }
  42764. #endif
  42765. #ifdef __LITTLE_ENDIAN__
  42766. __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
  42767. uint64x2_t __ret;
  42768. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
  42769. return __ret;
  42770. }
  42771. #else
  42772. __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
  42773. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42774. uint64x2_t __ret;
  42775. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
  42776. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42777. return __ret;
  42778. }
  42779. #endif
  42780. #ifdef __LITTLE_ENDIAN__
  42781. __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
  42782. uint16x8_t __ret;
  42783. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
  42784. return __ret;
  42785. }
  42786. #else
  42787. __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
  42788. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42789. uint16x8_t __ret;
  42790. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
  42791. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  42792. return __ret;
  42793. }
  42794. #endif
  42795. #ifdef __LITTLE_ENDIAN__
  42796. __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
  42797. uint8x16_t __ret;
  42798. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
  42799. return __ret;
  42800. }
  42801. #else
  42802. __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
  42803. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42804. uint8x16_t __ret;
  42805. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
  42806. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42807. return __ret;
  42808. }
  42809. #endif
  42810. #ifdef __LITTLE_ENDIAN__
  42811. __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
  42812. uint32x4_t __ret;
  42813. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
  42814. return __ret;
  42815. }
  42816. #else
  42817. __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
  42818. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42819. uint32x4_t __ret;
  42820. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
  42821. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  42822. return __ret;
  42823. }
  42824. #endif
  42825. #ifdef __LITTLE_ENDIAN__
  42826. __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
  42827. uint64x2_t __ret;
  42828. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
  42829. return __ret;
  42830. }
  42831. #else
  42832. __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
  42833. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42834. uint64x2_t __ret;
  42835. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
  42836. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42837. return __ret;
  42838. }
  42839. #endif
  42840. #ifdef __LITTLE_ENDIAN__
  42841. __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
  42842. uint16x8_t __ret;
  42843. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
  42844. return __ret;
  42845. }
  42846. #else
  42847. __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
  42848. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42849. uint16x8_t __ret;
  42850. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
  42851. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  42852. return __ret;
  42853. }
  42854. #endif
  42855. #ifdef __LITTLE_ENDIAN__
  42856. __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
  42857. uint8x16_t __ret;
  42858. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
  42859. return __ret;
  42860. }
  42861. #else
  42862. __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
  42863. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42864. uint8x16_t __ret;
  42865. __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
  42866. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  42867. return __ret;
  42868. }
  42869. #endif
  42870. #ifdef __LITTLE_ENDIAN__
  42871. __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
  42872. uint64x2_t __ret;
  42873. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
  42874. return __ret;
  42875. }
  42876. #else
  42877. __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
  42878. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42879. uint64x2_t __ret;
  42880. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
  42881. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42882. return __ret;
  42883. }
  42884. #endif
  42885. #ifdef __LITTLE_ENDIAN__
  42886. __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
  42887. uint32x4_t __ret;
  42888. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
  42889. return __ret;
  42890. }
  42891. #else
  42892. __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
  42893. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42894. uint32x4_t __ret;
  42895. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
  42896. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  42897. return __ret;
  42898. }
  42899. #endif
  42900. #ifdef __LITTLE_ENDIAN__
  42901. __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
  42902. uint32x4_t __ret;
  42903. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
  42904. return __ret;
  42905. }
  42906. #else
  42907. __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
  42908. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42909. uint32x4_t __ret;
  42910. __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
  42911. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  42912. return __ret;
  42913. }
  42914. #endif
  42915. #ifdef __LITTLE_ENDIAN__
  42916. __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
  42917. uint64x2_t __ret;
  42918. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
  42919. return __ret;
  42920. }
  42921. #else
  42922. __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
  42923. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42924. uint64x2_t __ret;
  42925. __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
  42926. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42927. return __ret;
  42928. }
  42929. #endif
  42930. #ifdef __LITTLE_ENDIAN__
  42931. __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
  42932. uint16x8_t __ret;
  42933. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
  42934. return __ret;
  42935. }
  42936. #else
  42937. __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
  42938. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42939. uint16x8_t __ret;
  42940. __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
  42941. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  42942. return __ret;
  42943. }
  42944. #endif
  42945. #ifdef __LITTLE_ENDIAN__
  42946. __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
  42947. uint8x8_t __ret;
  42948. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
  42949. return __ret;
  42950. }
  42951. #else
  42952. __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
  42953. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  42954. uint8x8_t __ret;
  42955. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
  42956. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  42957. return __ret;
  42958. }
  42959. #endif
  42960. #ifdef __LITTLE_ENDIAN__
  42961. __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
  42962. uint32x2_t __ret;
  42963. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
  42964. return __ret;
  42965. }
  42966. #else
  42967. __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
  42968. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  42969. uint32x2_t __ret;
  42970. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
  42971. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  42972. return __ret;
  42973. }
  42974. #endif
  42975. #ifdef __LITTLE_ENDIAN__
  42976. __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
  42977. uint64x1_t __ret;
  42978. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  42979. return __ret;
  42980. }
  42981. #else
  42982. __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
  42983. uint64x1_t __ret;
  42984. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  42985. return __ret;
  42986. }
  42987. #endif
  42988. #ifdef __LITTLE_ENDIAN__
  42989. __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
  42990. uint16x4_t __ret;
  42991. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
  42992. return __ret;
  42993. }
  42994. #else
  42995. __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
  42996. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  42997. uint16x4_t __ret;
  42998. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
  42999. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43000. return __ret;
  43001. }
  43002. #endif
  43003. #ifdef __LITTLE_ENDIAN__
  43004. __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
  43005. uint8x8_t __ret;
  43006. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
  43007. return __ret;
  43008. }
  43009. #else
  43010. __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
  43011. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  43012. uint8x8_t __ret;
  43013. __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
  43014. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  43015. return __ret;
  43016. }
  43017. #endif
  43018. #ifdef __LITTLE_ENDIAN__
  43019. __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
  43020. uint64x1_t __ret;
  43021. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  43022. return __ret;
  43023. }
  43024. #else
  43025. __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
  43026. uint64x1_t __ret;
  43027. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  43028. return __ret;
  43029. }
  43030. #endif
  43031. #ifdef __LITTLE_ENDIAN__
  43032. __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
  43033. uint32x2_t __ret;
  43034. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
  43035. return __ret;
  43036. }
  43037. #else
  43038. __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
  43039. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43040. uint32x2_t __ret;
  43041. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
  43042. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43043. return __ret;
  43044. }
  43045. #endif
  43046. #ifdef __LITTLE_ENDIAN__
  43047. __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
  43048. uint32x2_t __ret;
  43049. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
  43050. return __ret;
  43051. }
  43052. #else
  43053. __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
  43054. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43055. uint32x2_t __ret;
  43056. __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
  43057. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43058. return __ret;
  43059. }
  43060. #endif
  43061. #ifdef __LITTLE_ENDIAN__
  43062. __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
  43063. uint64x1_t __ret;
  43064. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  43065. return __ret;
  43066. }
  43067. #else
  43068. __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
  43069. uint64x1_t __ret;
  43070. __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
  43071. return __ret;
  43072. }
  43073. #endif
  43074. #ifdef __LITTLE_ENDIAN__
  43075. __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
  43076. uint16x4_t __ret;
  43077. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
  43078. return __ret;
  43079. }
  43080. #else
  43081. __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
  43082. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43083. uint16x4_t __ret;
  43084. __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
  43085. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43086. return __ret;
  43087. }
  43088. #endif
  43089. #ifdef __LITTLE_ENDIAN__
  43090. __ai uint64_t vceqzd_u64(uint64_t __p0) {
  43091. uint64_t __ret;
  43092. __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
  43093. return __ret;
  43094. }
  43095. #else
  43096. __ai uint64_t vceqzd_u64(uint64_t __p0) {
  43097. uint64_t __ret;
  43098. __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
  43099. return __ret;
  43100. }
  43101. #endif
  43102. #ifdef __LITTLE_ENDIAN__
  43103. __ai int64_t vceqzd_s64(int64_t __p0) {
  43104. int64_t __ret;
  43105. __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
  43106. return __ret;
  43107. }
  43108. #else
  43109. __ai int64_t vceqzd_s64(int64_t __p0) {
  43110. int64_t __ret;
  43111. __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
  43112. return __ret;
  43113. }
  43114. #endif
  43115. #ifdef __LITTLE_ENDIAN__
  43116. __ai uint64_t vceqzd_f64(float64_t __p0) {
  43117. uint64_t __ret;
  43118. __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
  43119. return __ret;
  43120. }
  43121. #else
  43122. __ai uint64_t vceqzd_f64(float64_t __p0) {
  43123. uint64_t __ret;
  43124. __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
  43125. return __ret;
  43126. }
  43127. #endif
  43128. #ifdef __LITTLE_ENDIAN__
  43129. __ai uint32_t vceqzs_f32(float32_t __p0) {
  43130. uint32_t __ret;
  43131. __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
  43132. return __ret;
  43133. }
  43134. #else
  43135. __ai uint32_t vceqzs_f32(float32_t __p0) {
  43136. uint32_t __ret;
  43137. __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
  43138. return __ret;
  43139. }
  43140. #endif
  43141. #ifdef __LITTLE_ENDIAN__
  43142. __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43143. uint64x2_t __ret;
  43144. __ret = (uint64x2_t)(__p0 >= __p1);
  43145. return __ret;
  43146. }
  43147. #else
  43148. __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43149. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43150. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43151. uint64x2_t __ret;
  43152. __ret = (uint64x2_t)(__rev0 >= __rev1);
  43153. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43154. return __ret;
  43155. }
  43156. #endif
  43157. #ifdef __LITTLE_ENDIAN__
  43158. __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
  43159. uint64x2_t __ret;
  43160. __ret = (uint64x2_t)(__p0 >= __p1);
  43161. return __ret;
  43162. }
  43163. #else
  43164. __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
  43165. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43166. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43167. uint64x2_t __ret;
  43168. __ret = (uint64x2_t)(__rev0 >= __rev1);
  43169. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43170. return __ret;
  43171. }
  43172. #endif
  43173. #ifdef __LITTLE_ENDIAN__
  43174. __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
  43175. uint64x2_t __ret;
  43176. __ret = (uint64x2_t)(__p0 >= __p1);
  43177. return __ret;
  43178. }
  43179. #else
  43180. __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
  43181. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43182. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43183. uint64x2_t __ret;
  43184. __ret = (uint64x2_t)(__rev0 >= __rev1);
  43185. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43186. return __ret;
  43187. }
  43188. #endif
  43189. #ifdef __LITTLE_ENDIAN__
  43190. __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43191. uint64x1_t __ret;
  43192. __ret = (uint64x1_t)(__p0 >= __p1);
  43193. return __ret;
  43194. }
  43195. #else
  43196. __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43197. uint64x1_t __ret;
  43198. __ret = (uint64x1_t)(__p0 >= __p1);
  43199. return __ret;
  43200. }
  43201. #endif
  43202. #ifdef __LITTLE_ENDIAN__
  43203. __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
  43204. uint64x1_t __ret;
  43205. __ret = (uint64x1_t)(__p0 >= __p1);
  43206. return __ret;
  43207. }
  43208. #else
  43209. __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
  43210. uint64x1_t __ret;
  43211. __ret = (uint64x1_t)(__p0 >= __p1);
  43212. return __ret;
  43213. }
  43214. #endif
  43215. #ifdef __LITTLE_ENDIAN__
  43216. __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
  43217. uint64x1_t __ret;
  43218. __ret = (uint64x1_t)(__p0 >= __p1);
  43219. return __ret;
  43220. }
  43221. #else
  43222. __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
  43223. uint64x1_t __ret;
  43224. __ret = (uint64x1_t)(__p0 >= __p1);
  43225. return __ret;
  43226. }
  43227. #endif
  43228. #ifdef __LITTLE_ENDIAN__
  43229. __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
  43230. int64_t __ret;
  43231. __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
  43232. return __ret;
  43233. }
  43234. #else
  43235. __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
  43236. int64_t __ret;
  43237. __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
  43238. return __ret;
  43239. }
  43240. #endif
  43241. #ifdef __LITTLE_ENDIAN__
  43242. __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
  43243. uint64_t __ret;
  43244. __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
  43245. return __ret;
  43246. }
  43247. #else
  43248. __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
  43249. uint64_t __ret;
  43250. __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
  43251. return __ret;
  43252. }
  43253. #endif
  43254. #ifdef __LITTLE_ENDIAN__
  43255. __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
  43256. uint64_t __ret;
  43257. __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
  43258. return __ret;
  43259. }
  43260. #else
  43261. __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
  43262. uint64_t __ret;
  43263. __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
  43264. return __ret;
  43265. }
  43266. #endif
  43267. #ifdef __LITTLE_ENDIAN__
  43268. __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
  43269. uint32_t __ret;
  43270. __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
  43271. return __ret;
  43272. }
  43273. #else
  43274. __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
  43275. uint32_t __ret;
  43276. __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
  43277. return __ret;
  43278. }
  43279. #endif
  43280. #ifdef __LITTLE_ENDIAN__
  43281. __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
  43282. uint8x16_t __ret;
  43283. __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
  43284. return __ret;
  43285. }
  43286. #else
  43287. __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
  43288. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  43289. uint8x16_t __ret;
  43290. __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
  43291. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  43292. return __ret;
  43293. }
  43294. #endif
  43295. #ifdef __LITTLE_ENDIAN__
  43296. __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
  43297. uint64x2_t __ret;
  43298. __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
  43299. return __ret;
  43300. }
  43301. #else
  43302. __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
  43303. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43304. uint64x2_t __ret;
  43305. __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
  43306. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43307. return __ret;
  43308. }
  43309. #endif
  43310. #ifdef __LITTLE_ENDIAN__
  43311. __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
  43312. uint32x4_t __ret;
  43313. __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
  43314. return __ret;
  43315. }
  43316. #else
  43317. __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
  43318. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43319. uint32x4_t __ret;
  43320. __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
  43321. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43322. return __ret;
  43323. }
  43324. #endif
  43325. #ifdef __LITTLE_ENDIAN__
  43326. __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
  43327. uint32x4_t __ret;
  43328. __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
  43329. return __ret;
  43330. }
  43331. #else
  43332. __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
  43333. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43334. uint32x4_t __ret;
  43335. __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
  43336. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43337. return __ret;
  43338. }
  43339. #endif
  43340. #ifdef __LITTLE_ENDIAN__
  43341. __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
  43342. uint64x2_t __ret;
  43343. __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
  43344. return __ret;
  43345. }
  43346. #else
  43347. __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
  43348. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43349. uint64x2_t __ret;
  43350. __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
  43351. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43352. return __ret;
  43353. }
  43354. #endif
  43355. #ifdef __LITTLE_ENDIAN__
  43356. __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
  43357. uint16x8_t __ret;
  43358. __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
  43359. return __ret;
  43360. }
  43361. #else
  43362. __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
  43363. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  43364. uint16x8_t __ret;
  43365. __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
  43366. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  43367. return __ret;
  43368. }
  43369. #endif
  43370. #ifdef __LITTLE_ENDIAN__
  43371. __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
  43372. uint8x8_t __ret;
  43373. __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
  43374. return __ret;
  43375. }
  43376. #else
  43377. __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
  43378. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  43379. uint8x8_t __ret;
  43380. __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
  43381. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  43382. return __ret;
  43383. }
  43384. #endif
  43385. #ifdef __LITTLE_ENDIAN__
  43386. __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
  43387. uint64x1_t __ret;
  43388. __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
  43389. return __ret;
  43390. }
  43391. #else
  43392. __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
  43393. uint64x1_t __ret;
  43394. __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
  43395. return __ret;
  43396. }
  43397. #endif
  43398. #ifdef __LITTLE_ENDIAN__
  43399. __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
  43400. uint32x2_t __ret;
  43401. __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
  43402. return __ret;
  43403. }
  43404. #else
  43405. __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
  43406. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43407. uint32x2_t __ret;
  43408. __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
  43409. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43410. return __ret;
  43411. }
  43412. #endif
  43413. #ifdef __LITTLE_ENDIAN__
  43414. __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
  43415. uint32x2_t __ret;
  43416. __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
  43417. return __ret;
  43418. }
  43419. #else
  43420. __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
  43421. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43422. uint32x2_t __ret;
  43423. __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
  43424. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43425. return __ret;
  43426. }
  43427. #endif
  43428. #ifdef __LITTLE_ENDIAN__
  43429. __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
  43430. uint64x1_t __ret;
  43431. __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
  43432. return __ret;
  43433. }
  43434. #else
  43435. __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
  43436. uint64x1_t __ret;
  43437. __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
  43438. return __ret;
  43439. }
  43440. #endif
  43441. #ifdef __LITTLE_ENDIAN__
  43442. __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
  43443. uint16x4_t __ret;
  43444. __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
  43445. return __ret;
  43446. }
  43447. #else
  43448. __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
  43449. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43450. uint16x4_t __ret;
  43451. __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
  43452. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43453. return __ret;
  43454. }
  43455. #endif
  43456. #ifdef __LITTLE_ENDIAN__
  43457. __ai int64_t vcgezd_s64(int64_t __p0) {
  43458. int64_t __ret;
  43459. __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
  43460. return __ret;
  43461. }
  43462. #else
  43463. __ai int64_t vcgezd_s64(int64_t __p0) {
  43464. int64_t __ret;
  43465. __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
  43466. return __ret;
  43467. }
  43468. #endif
  43469. #ifdef __LITTLE_ENDIAN__
  43470. __ai uint64_t vcgezd_f64(float64_t __p0) {
  43471. uint64_t __ret;
  43472. __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
  43473. return __ret;
  43474. }
  43475. #else
  43476. __ai uint64_t vcgezd_f64(float64_t __p0) {
  43477. uint64_t __ret;
  43478. __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
  43479. return __ret;
  43480. }
  43481. #endif
  43482. #ifdef __LITTLE_ENDIAN__
  43483. __ai uint32_t vcgezs_f32(float32_t __p0) {
  43484. uint32_t __ret;
  43485. __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
  43486. return __ret;
  43487. }
  43488. #else
  43489. __ai uint32_t vcgezs_f32(float32_t __p0) {
  43490. uint32_t __ret;
  43491. __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
  43492. return __ret;
  43493. }
  43494. #endif
  43495. #ifdef __LITTLE_ENDIAN__
  43496. __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43497. uint64x2_t __ret;
  43498. __ret = (uint64x2_t)(__p0 > __p1);
  43499. return __ret;
  43500. }
  43501. #else
  43502. __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43503. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43504. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43505. uint64x2_t __ret;
  43506. __ret = (uint64x2_t)(__rev0 > __rev1);
  43507. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43508. return __ret;
  43509. }
  43510. #endif
  43511. #ifdef __LITTLE_ENDIAN__
  43512. __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
  43513. uint64x2_t __ret;
  43514. __ret = (uint64x2_t)(__p0 > __p1);
  43515. return __ret;
  43516. }
  43517. #else
  43518. __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
  43519. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43520. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43521. uint64x2_t __ret;
  43522. __ret = (uint64x2_t)(__rev0 > __rev1);
  43523. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43524. return __ret;
  43525. }
  43526. #endif
  43527. #ifdef __LITTLE_ENDIAN__
  43528. __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
  43529. uint64x2_t __ret;
  43530. __ret = (uint64x2_t)(__p0 > __p1);
  43531. return __ret;
  43532. }
  43533. #else
  43534. __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
  43535. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43536. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43537. uint64x2_t __ret;
  43538. __ret = (uint64x2_t)(__rev0 > __rev1);
  43539. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43540. return __ret;
  43541. }
  43542. #endif
  43543. #ifdef __LITTLE_ENDIAN__
  43544. __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43545. uint64x1_t __ret;
  43546. __ret = (uint64x1_t)(__p0 > __p1);
  43547. return __ret;
  43548. }
  43549. #else
  43550. __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43551. uint64x1_t __ret;
  43552. __ret = (uint64x1_t)(__p0 > __p1);
  43553. return __ret;
  43554. }
  43555. #endif
  43556. #ifdef __LITTLE_ENDIAN__
  43557. __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
  43558. uint64x1_t __ret;
  43559. __ret = (uint64x1_t)(__p0 > __p1);
  43560. return __ret;
  43561. }
  43562. #else
  43563. __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
  43564. uint64x1_t __ret;
  43565. __ret = (uint64x1_t)(__p0 > __p1);
  43566. return __ret;
  43567. }
  43568. #endif
  43569. #ifdef __LITTLE_ENDIAN__
  43570. __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
  43571. uint64x1_t __ret;
  43572. __ret = (uint64x1_t)(__p0 > __p1);
  43573. return __ret;
  43574. }
  43575. #else
  43576. __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
  43577. uint64x1_t __ret;
  43578. __ret = (uint64x1_t)(__p0 > __p1);
  43579. return __ret;
  43580. }
  43581. #endif
  43582. #ifdef __LITTLE_ENDIAN__
  43583. __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
  43584. int64_t __ret;
  43585. __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
  43586. return __ret;
  43587. }
  43588. #else
  43589. __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
  43590. int64_t __ret;
  43591. __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
  43592. return __ret;
  43593. }
  43594. #endif
  43595. #ifdef __LITTLE_ENDIAN__
  43596. __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
  43597. uint64_t __ret;
  43598. __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
  43599. return __ret;
  43600. }
  43601. #else
  43602. __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
  43603. uint64_t __ret;
  43604. __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
  43605. return __ret;
  43606. }
  43607. #endif
  43608. #ifdef __LITTLE_ENDIAN__
  43609. __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
  43610. uint64_t __ret;
  43611. __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
  43612. return __ret;
  43613. }
  43614. #else
  43615. __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
  43616. uint64_t __ret;
  43617. __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
  43618. return __ret;
  43619. }
  43620. #endif
  43621. #ifdef __LITTLE_ENDIAN__
  43622. __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
  43623. uint32_t __ret;
  43624. __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
  43625. return __ret;
  43626. }
  43627. #else
  43628. __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
  43629. uint32_t __ret;
  43630. __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
  43631. return __ret;
  43632. }
  43633. #endif
  43634. #ifdef __LITTLE_ENDIAN__
  43635. __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
  43636. uint8x16_t __ret;
  43637. __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
  43638. return __ret;
  43639. }
  43640. #else
  43641. __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
  43642. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  43643. uint8x16_t __ret;
  43644. __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
  43645. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  43646. return __ret;
  43647. }
  43648. #endif
  43649. #ifdef __LITTLE_ENDIAN__
  43650. __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
  43651. uint64x2_t __ret;
  43652. __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
  43653. return __ret;
  43654. }
  43655. #else
  43656. __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
  43657. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43658. uint64x2_t __ret;
  43659. __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
  43660. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43661. return __ret;
  43662. }
  43663. #endif
  43664. #ifdef __LITTLE_ENDIAN__
  43665. __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
  43666. uint32x4_t __ret;
  43667. __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
  43668. return __ret;
  43669. }
  43670. #else
  43671. __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
  43672. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43673. uint32x4_t __ret;
  43674. __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
  43675. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43676. return __ret;
  43677. }
  43678. #endif
  43679. #ifdef __LITTLE_ENDIAN__
  43680. __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
  43681. uint32x4_t __ret;
  43682. __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
  43683. return __ret;
  43684. }
  43685. #else
  43686. __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
  43687. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43688. uint32x4_t __ret;
  43689. __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
  43690. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43691. return __ret;
  43692. }
  43693. #endif
  43694. #ifdef __LITTLE_ENDIAN__
  43695. __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
  43696. uint64x2_t __ret;
  43697. __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
  43698. return __ret;
  43699. }
  43700. #else
  43701. __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
  43702. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43703. uint64x2_t __ret;
  43704. __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
  43705. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43706. return __ret;
  43707. }
  43708. #endif
  43709. #ifdef __LITTLE_ENDIAN__
  43710. __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
  43711. uint16x8_t __ret;
  43712. __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
  43713. return __ret;
  43714. }
  43715. #else
  43716. __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
  43717. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  43718. uint16x8_t __ret;
  43719. __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
  43720. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  43721. return __ret;
  43722. }
  43723. #endif
  43724. #ifdef __LITTLE_ENDIAN__
  43725. __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
  43726. uint8x8_t __ret;
  43727. __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
  43728. return __ret;
  43729. }
  43730. #else
  43731. __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
  43732. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  43733. uint8x8_t __ret;
  43734. __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
  43735. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  43736. return __ret;
  43737. }
  43738. #endif
  43739. #ifdef __LITTLE_ENDIAN__
  43740. __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
  43741. uint64x1_t __ret;
  43742. __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
  43743. return __ret;
  43744. }
  43745. #else
  43746. __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
  43747. uint64x1_t __ret;
  43748. __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
  43749. return __ret;
  43750. }
  43751. #endif
  43752. #ifdef __LITTLE_ENDIAN__
  43753. __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
  43754. uint32x2_t __ret;
  43755. __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
  43756. return __ret;
  43757. }
  43758. #else
  43759. __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
  43760. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43761. uint32x2_t __ret;
  43762. __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
  43763. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43764. return __ret;
  43765. }
  43766. #endif
  43767. #ifdef __LITTLE_ENDIAN__
  43768. __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
  43769. uint32x2_t __ret;
  43770. __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
  43771. return __ret;
  43772. }
  43773. #else
  43774. __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
  43775. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43776. uint32x2_t __ret;
  43777. __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
  43778. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43779. return __ret;
  43780. }
  43781. #endif
  43782. #ifdef __LITTLE_ENDIAN__
  43783. __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
  43784. uint64x1_t __ret;
  43785. __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
  43786. return __ret;
  43787. }
  43788. #else
  43789. __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
  43790. uint64x1_t __ret;
  43791. __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
  43792. return __ret;
  43793. }
  43794. #endif
  43795. #ifdef __LITTLE_ENDIAN__
  43796. __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
  43797. uint16x4_t __ret;
  43798. __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
  43799. return __ret;
  43800. }
  43801. #else
  43802. __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
  43803. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  43804. uint16x4_t __ret;
  43805. __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
  43806. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  43807. return __ret;
  43808. }
  43809. #endif
  43810. #ifdef __LITTLE_ENDIAN__
  43811. __ai int64_t vcgtzd_s64(int64_t __p0) {
  43812. int64_t __ret;
  43813. __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
  43814. return __ret;
  43815. }
  43816. #else
  43817. __ai int64_t vcgtzd_s64(int64_t __p0) {
  43818. int64_t __ret;
  43819. __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
  43820. return __ret;
  43821. }
  43822. #endif
  43823. #ifdef __LITTLE_ENDIAN__
  43824. __ai uint64_t vcgtzd_f64(float64_t __p0) {
  43825. uint64_t __ret;
  43826. __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
  43827. return __ret;
  43828. }
  43829. #else
  43830. __ai uint64_t vcgtzd_f64(float64_t __p0) {
  43831. uint64_t __ret;
  43832. __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
  43833. return __ret;
  43834. }
  43835. #endif
  43836. #ifdef __LITTLE_ENDIAN__
  43837. __ai uint32_t vcgtzs_f32(float32_t __p0) {
  43838. uint32_t __ret;
  43839. __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
  43840. return __ret;
  43841. }
  43842. #else
  43843. __ai uint32_t vcgtzs_f32(float32_t __p0) {
  43844. uint32_t __ret;
  43845. __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
  43846. return __ret;
  43847. }
  43848. #endif
  43849. #ifdef __LITTLE_ENDIAN__
  43850. __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43851. uint64x2_t __ret;
  43852. __ret = (uint64x2_t)(__p0 <= __p1);
  43853. return __ret;
  43854. }
  43855. #else
  43856. __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  43857. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43858. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43859. uint64x2_t __ret;
  43860. __ret = (uint64x2_t)(__rev0 <= __rev1);
  43861. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43862. return __ret;
  43863. }
  43864. #endif
  43865. #ifdef __LITTLE_ENDIAN__
  43866. __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
  43867. uint64x2_t __ret;
  43868. __ret = (uint64x2_t)(__p0 <= __p1);
  43869. return __ret;
  43870. }
  43871. #else
  43872. __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
  43873. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43874. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43875. uint64x2_t __ret;
  43876. __ret = (uint64x2_t)(__rev0 <= __rev1);
  43877. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43878. return __ret;
  43879. }
  43880. #endif
  43881. #ifdef __LITTLE_ENDIAN__
  43882. __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
  43883. uint64x2_t __ret;
  43884. __ret = (uint64x2_t)(__p0 <= __p1);
  43885. return __ret;
  43886. }
  43887. #else
  43888. __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
  43889. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  43890. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  43891. uint64x2_t __ret;
  43892. __ret = (uint64x2_t)(__rev0 <= __rev1);
  43893. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  43894. return __ret;
  43895. }
  43896. #endif
  43897. #ifdef __LITTLE_ENDIAN__
  43898. __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43899. uint64x1_t __ret;
  43900. __ret = (uint64x1_t)(__p0 <= __p1);
  43901. return __ret;
  43902. }
  43903. #else
  43904. __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
  43905. uint64x1_t __ret;
  43906. __ret = (uint64x1_t)(__p0 <= __p1);
  43907. return __ret;
  43908. }
  43909. #endif
  43910. #ifdef __LITTLE_ENDIAN__
  43911. __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
  43912. uint64x1_t __ret;
  43913. __ret = (uint64x1_t)(__p0 <= __p1);
  43914. return __ret;
  43915. }
  43916. #else
  43917. __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
  43918. uint64x1_t __ret;
  43919. __ret = (uint64x1_t)(__p0 <= __p1);
  43920. return __ret;
  43921. }
  43922. #endif
  43923. #ifdef __LITTLE_ENDIAN__
  43924. __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
  43925. uint64x1_t __ret;
  43926. __ret = (uint64x1_t)(__p0 <= __p1);
  43927. return __ret;
  43928. }
  43929. #else
  43930. __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
  43931. uint64x1_t __ret;
  43932. __ret = (uint64x1_t)(__p0 <= __p1);
  43933. return __ret;
  43934. }
  43935. #endif
  43936. #ifdef __LITTLE_ENDIAN__
  43937. __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
  43938. uint64_t __ret;
  43939. __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
  43940. return __ret;
  43941. }
  43942. #else
  43943. __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
  43944. uint64_t __ret;
  43945. __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
  43946. return __ret;
  43947. }
  43948. #endif
  43949. #ifdef __LITTLE_ENDIAN__
  43950. __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
  43951. int64_t __ret;
  43952. __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
  43953. return __ret;
  43954. }
  43955. #else
  43956. __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
  43957. int64_t __ret;
  43958. __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
  43959. return __ret;
  43960. }
  43961. #endif
  43962. #ifdef __LITTLE_ENDIAN__
  43963. __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
  43964. uint64_t __ret;
  43965. __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
  43966. return __ret;
  43967. }
  43968. #else
  43969. __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
  43970. uint64_t __ret;
  43971. __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
  43972. return __ret;
  43973. }
  43974. #endif
  43975. #ifdef __LITTLE_ENDIAN__
  43976. __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
  43977. uint32_t __ret;
  43978. __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
  43979. return __ret;
  43980. }
  43981. #else
  43982. __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
  43983. uint32_t __ret;
  43984. __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
  43985. return __ret;
  43986. }
  43987. #endif
  43988. #ifdef __LITTLE_ENDIAN__
  43989. __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
  43990. uint8x16_t __ret;
  43991. __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
  43992. return __ret;
  43993. }
  43994. #else
  43995. __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
  43996. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  43997. uint8x16_t __ret;
  43998. __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
  43999. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  44000. return __ret;
  44001. }
  44002. #endif
  44003. #ifdef __LITTLE_ENDIAN__
  44004. __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
  44005. uint64x2_t __ret;
  44006. __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
  44007. return __ret;
  44008. }
  44009. #else
  44010. __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
  44011. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44012. uint64x2_t __ret;
  44013. __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
  44014. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44015. return __ret;
  44016. }
  44017. #endif
  44018. #ifdef __LITTLE_ENDIAN__
  44019. __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
  44020. uint32x4_t __ret;
  44021. __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
  44022. return __ret;
  44023. }
  44024. #else
  44025. __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
  44026. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44027. uint32x4_t __ret;
  44028. __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
  44029. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44030. return __ret;
  44031. }
  44032. #endif
  44033. #ifdef __LITTLE_ENDIAN__
  44034. __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
  44035. uint32x4_t __ret;
  44036. __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
  44037. return __ret;
  44038. }
  44039. #else
  44040. __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
  44041. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44042. uint32x4_t __ret;
  44043. __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
  44044. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44045. return __ret;
  44046. }
  44047. #endif
  44048. #ifdef __LITTLE_ENDIAN__
  44049. __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
  44050. uint64x2_t __ret;
  44051. __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
  44052. return __ret;
  44053. }
  44054. #else
  44055. __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
  44056. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44057. uint64x2_t __ret;
  44058. __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
  44059. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44060. return __ret;
  44061. }
  44062. #endif
  44063. #ifdef __LITTLE_ENDIAN__
  44064. __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
  44065. uint16x8_t __ret;
  44066. __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
  44067. return __ret;
  44068. }
  44069. #else
  44070. __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
  44071. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  44072. uint16x8_t __ret;
  44073. __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
  44074. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  44075. return __ret;
  44076. }
  44077. #endif
  44078. #ifdef __LITTLE_ENDIAN__
  44079. __ai uint8x8_t vclez_s8(int8x8_t __p0) {
  44080. uint8x8_t __ret;
  44081. __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
  44082. return __ret;
  44083. }
  44084. #else
  44085. __ai uint8x8_t vclez_s8(int8x8_t __p0) {
  44086. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  44087. uint8x8_t __ret;
  44088. __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
  44089. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  44090. return __ret;
  44091. }
  44092. #endif
  44093. #ifdef __LITTLE_ENDIAN__
  44094. __ai uint64x1_t vclez_f64(float64x1_t __p0) {
  44095. uint64x1_t __ret;
  44096. __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
  44097. return __ret;
  44098. }
  44099. #else
  44100. __ai uint64x1_t vclez_f64(float64x1_t __p0) {
  44101. uint64x1_t __ret;
  44102. __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
  44103. return __ret;
  44104. }
  44105. #endif
  44106. #ifdef __LITTLE_ENDIAN__
  44107. __ai uint32x2_t vclez_f32(float32x2_t __p0) {
  44108. uint32x2_t __ret;
  44109. __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
  44110. return __ret;
  44111. }
  44112. #else
  44113. __ai uint32x2_t vclez_f32(float32x2_t __p0) {
  44114. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44115. uint32x2_t __ret;
  44116. __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
  44117. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44118. return __ret;
  44119. }
  44120. #endif
  44121. #ifdef __LITTLE_ENDIAN__
  44122. __ai uint32x2_t vclez_s32(int32x2_t __p0) {
  44123. uint32x2_t __ret;
  44124. __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
  44125. return __ret;
  44126. }
  44127. #else
  44128. __ai uint32x2_t vclez_s32(int32x2_t __p0) {
  44129. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44130. uint32x2_t __ret;
  44131. __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
  44132. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44133. return __ret;
  44134. }
  44135. #endif
  44136. #ifdef __LITTLE_ENDIAN__
  44137. __ai uint64x1_t vclez_s64(int64x1_t __p0) {
  44138. uint64x1_t __ret;
  44139. __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
  44140. return __ret;
  44141. }
  44142. #else
  44143. __ai uint64x1_t vclez_s64(int64x1_t __p0) {
  44144. uint64x1_t __ret;
  44145. __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
  44146. return __ret;
  44147. }
  44148. #endif
  44149. #ifdef __LITTLE_ENDIAN__
  44150. __ai uint16x4_t vclez_s16(int16x4_t __p0) {
  44151. uint16x4_t __ret;
  44152. __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
  44153. return __ret;
  44154. }
  44155. #else
  44156. __ai uint16x4_t vclez_s16(int16x4_t __p0) {
  44157. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44158. uint16x4_t __ret;
  44159. __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
  44160. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44161. return __ret;
  44162. }
  44163. #endif
  44164. #ifdef __LITTLE_ENDIAN__
  44165. __ai int64_t vclezd_s64(int64_t __p0) {
  44166. int64_t __ret;
  44167. __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
  44168. return __ret;
  44169. }
  44170. #else
  44171. __ai int64_t vclezd_s64(int64_t __p0) {
  44172. int64_t __ret;
  44173. __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
  44174. return __ret;
  44175. }
  44176. #endif
  44177. #ifdef __LITTLE_ENDIAN__
  44178. __ai uint64_t vclezd_f64(float64_t __p0) {
  44179. uint64_t __ret;
  44180. __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
  44181. return __ret;
  44182. }
  44183. #else
  44184. __ai uint64_t vclezd_f64(float64_t __p0) {
  44185. uint64_t __ret;
  44186. __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
  44187. return __ret;
  44188. }
  44189. #endif
  44190. #ifdef __LITTLE_ENDIAN__
  44191. __ai uint32_t vclezs_f32(float32_t __p0) {
  44192. uint32_t __ret;
  44193. __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
  44194. return __ret;
  44195. }
  44196. #else
  44197. __ai uint32_t vclezs_f32(float32_t __p0) {
  44198. uint32_t __ret;
  44199. __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
  44200. return __ret;
  44201. }
  44202. #endif
  44203. #ifdef __LITTLE_ENDIAN__
  44204. __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  44205. uint64x2_t __ret;
  44206. __ret = (uint64x2_t)(__p0 < __p1);
  44207. return __ret;
  44208. }
  44209. #else
  44210. __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  44211. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44212. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  44213. uint64x2_t __ret;
  44214. __ret = (uint64x2_t)(__rev0 < __rev1);
  44215. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44216. return __ret;
  44217. }
  44218. #endif
  44219. #ifdef __LITTLE_ENDIAN__
  44220. __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
  44221. uint64x2_t __ret;
  44222. __ret = (uint64x2_t)(__p0 < __p1);
  44223. return __ret;
  44224. }
  44225. #else
  44226. __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
  44227. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44228. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  44229. uint64x2_t __ret;
  44230. __ret = (uint64x2_t)(__rev0 < __rev1);
  44231. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44232. return __ret;
  44233. }
  44234. #endif
  44235. #ifdef __LITTLE_ENDIAN__
  44236. __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
  44237. uint64x2_t __ret;
  44238. __ret = (uint64x2_t)(__p0 < __p1);
  44239. return __ret;
  44240. }
  44241. #else
  44242. __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
  44243. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44244. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  44245. uint64x2_t __ret;
  44246. __ret = (uint64x2_t)(__rev0 < __rev1);
  44247. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44248. return __ret;
  44249. }
  44250. #endif
  44251. #ifdef __LITTLE_ENDIAN__
  44252. __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
  44253. uint64x1_t __ret;
  44254. __ret = (uint64x1_t)(__p0 < __p1);
  44255. return __ret;
  44256. }
  44257. #else
  44258. __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
  44259. uint64x1_t __ret;
  44260. __ret = (uint64x1_t)(__p0 < __p1);
  44261. return __ret;
  44262. }
  44263. #endif
  44264. #ifdef __LITTLE_ENDIAN__
  44265. __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
  44266. uint64x1_t __ret;
  44267. __ret = (uint64x1_t)(__p0 < __p1);
  44268. return __ret;
  44269. }
  44270. #else
  44271. __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
  44272. uint64x1_t __ret;
  44273. __ret = (uint64x1_t)(__p0 < __p1);
  44274. return __ret;
  44275. }
  44276. #endif
  44277. #ifdef __LITTLE_ENDIAN__
  44278. __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
  44279. uint64x1_t __ret;
  44280. __ret = (uint64x1_t)(__p0 < __p1);
  44281. return __ret;
  44282. }
  44283. #else
  44284. __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
  44285. uint64x1_t __ret;
  44286. __ret = (uint64x1_t)(__p0 < __p1);
  44287. return __ret;
  44288. }
  44289. #endif
  44290. #ifdef __LITTLE_ENDIAN__
  44291. __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
  44292. uint64_t __ret;
  44293. __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
  44294. return __ret;
  44295. }
  44296. #else
  44297. __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
  44298. uint64_t __ret;
  44299. __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
  44300. return __ret;
  44301. }
  44302. #endif
  44303. #ifdef __LITTLE_ENDIAN__
  44304. __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
  44305. int64_t __ret;
  44306. __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
  44307. return __ret;
  44308. }
  44309. #else
  44310. __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
  44311. int64_t __ret;
  44312. __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
  44313. return __ret;
  44314. }
  44315. #endif
  44316. #ifdef __LITTLE_ENDIAN__
  44317. __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
  44318. uint64_t __ret;
  44319. __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
  44320. return __ret;
  44321. }
  44322. #else
  44323. __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
  44324. uint64_t __ret;
  44325. __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
  44326. return __ret;
  44327. }
  44328. #endif
  44329. #ifdef __LITTLE_ENDIAN__
  44330. __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
  44331. uint32_t __ret;
  44332. __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
  44333. return __ret;
  44334. }
  44335. #else
  44336. __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
  44337. uint32_t __ret;
  44338. __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
  44339. return __ret;
  44340. }
  44341. #endif
  44342. #ifdef __LITTLE_ENDIAN__
  44343. __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
  44344. uint8x16_t __ret;
  44345. __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
  44346. return __ret;
  44347. }
  44348. #else
  44349. __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
  44350. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  44351. uint8x16_t __ret;
  44352. __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
  44353. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  44354. return __ret;
  44355. }
  44356. #endif
  44357. #ifdef __LITTLE_ENDIAN__
  44358. __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
  44359. uint64x2_t __ret;
  44360. __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
  44361. return __ret;
  44362. }
  44363. #else
  44364. __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
  44365. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44366. uint64x2_t __ret;
  44367. __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
  44368. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44369. return __ret;
  44370. }
  44371. #endif
  44372. #ifdef __LITTLE_ENDIAN__
  44373. __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
  44374. uint32x4_t __ret;
  44375. __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
  44376. return __ret;
  44377. }
  44378. #else
  44379. __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
  44380. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44381. uint32x4_t __ret;
  44382. __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
  44383. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44384. return __ret;
  44385. }
  44386. #endif
  44387. #ifdef __LITTLE_ENDIAN__
  44388. __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
  44389. uint32x4_t __ret;
  44390. __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
  44391. return __ret;
  44392. }
  44393. #else
  44394. __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
  44395. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44396. uint32x4_t __ret;
  44397. __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
  44398. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44399. return __ret;
  44400. }
  44401. #endif
  44402. #ifdef __LITTLE_ENDIAN__
  44403. __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
  44404. uint64x2_t __ret;
  44405. __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
  44406. return __ret;
  44407. }
  44408. #else
  44409. __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
  44410. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44411. uint64x2_t __ret;
  44412. __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
  44413. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44414. return __ret;
  44415. }
  44416. #endif
  44417. #ifdef __LITTLE_ENDIAN__
  44418. __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
  44419. uint16x8_t __ret;
  44420. __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
  44421. return __ret;
  44422. }
  44423. #else
  44424. __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
  44425. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  44426. uint16x8_t __ret;
  44427. __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
  44428. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  44429. return __ret;
  44430. }
  44431. #endif
  44432. #ifdef __LITTLE_ENDIAN__
  44433. __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
  44434. uint8x8_t __ret;
  44435. __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
  44436. return __ret;
  44437. }
  44438. #else
  44439. __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
  44440. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  44441. uint8x8_t __ret;
  44442. __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
  44443. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  44444. return __ret;
  44445. }
  44446. #endif
  44447. #ifdef __LITTLE_ENDIAN__
  44448. __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
  44449. uint64x1_t __ret;
  44450. __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
  44451. return __ret;
  44452. }
  44453. #else
  44454. __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
  44455. uint64x1_t __ret;
  44456. __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
  44457. return __ret;
  44458. }
  44459. #endif
  44460. #ifdef __LITTLE_ENDIAN__
  44461. __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
  44462. uint32x2_t __ret;
  44463. __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
  44464. return __ret;
  44465. }
  44466. #else
  44467. __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
  44468. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44469. uint32x2_t __ret;
  44470. __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
  44471. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44472. return __ret;
  44473. }
  44474. #endif
  44475. #ifdef __LITTLE_ENDIAN__
  44476. __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
  44477. uint32x2_t __ret;
  44478. __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
  44479. return __ret;
  44480. }
  44481. #else
  44482. __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
  44483. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  44484. uint32x2_t __ret;
  44485. __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
  44486. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44487. return __ret;
  44488. }
  44489. #endif
  44490. #ifdef __LITTLE_ENDIAN__
  44491. __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
  44492. uint64x1_t __ret;
  44493. __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
  44494. return __ret;
  44495. }
  44496. #else
  44497. __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
  44498. uint64x1_t __ret;
  44499. __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
  44500. return __ret;
  44501. }
  44502. #endif
  44503. #ifdef __LITTLE_ENDIAN__
  44504. __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
  44505. uint16x4_t __ret;
  44506. __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
  44507. return __ret;
  44508. }
  44509. #else
  44510. __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
  44511. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  44512. uint16x4_t __ret;
  44513. __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
  44514. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  44515. return __ret;
  44516. }
  44517. #endif
  44518. #ifdef __LITTLE_ENDIAN__
  44519. __ai int64_t vcltzd_s64(int64_t __p0) {
  44520. int64_t __ret;
  44521. __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
  44522. return __ret;
  44523. }
  44524. #else
  44525. __ai int64_t vcltzd_s64(int64_t __p0) {
  44526. int64_t __ret;
  44527. __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
  44528. return __ret;
  44529. }
  44530. #endif
  44531. #ifdef __LITTLE_ENDIAN__
  44532. __ai uint64_t vcltzd_f64(float64_t __p0) {
  44533. uint64_t __ret;
  44534. __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
  44535. return __ret;
  44536. }
  44537. #else
  44538. __ai uint64_t vcltzd_f64(float64_t __p0) {
  44539. uint64_t __ret;
  44540. __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
  44541. return __ret;
  44542. }
  44543. #endif
  44544. #ifdef __LITTLE_ENDIAN__
  44545. __ai uint32_t vcltzs_f32(float32_t __p0) {
  44546. uint32_t __ret;
  44547. __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
  44548. return __ret;
  44549. }
  44550. #else
  44551. __ai uint32_t vcltzs_f32(float32_t __p0) {
  44552. uint32_t __ret;
  44553. __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
  44554. return __ret;
  44555. }
  44556. #endif
  44557. #ifdef __LITTLE_ENDIAN__
  44558. __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
  44559. poly64x2_t __ret;
  44560. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  44561. return __ret;
  44562. }
  44563. #else
  44564. __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
  44565. poly64x2_t __ret;
  44566. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  44567. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44568. return __ret;
  44569. }
  44570. #endif
  44571. #ifdef __LITTLE_ENDIAN__
  44572. __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
  44573. float64x2_t __ret;
  44574. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  44575. return __ret;
  44576. }
  44577. #else
  44578. __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
  44579. float64x2_t __ret;
  44580. __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
  44581. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  44582. return __ret;
  44583. }
  44584. #endif
  44585. #ifdef __LITTLE_ENDIAN__
  44586. #define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
  44587. poly8x16_t __s0_12 = __p0_12; \
  44588. poly8x8_t __s2_12 = __p2_12; \
  44589. poly8x16_t __ret_12; \
  44590. __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
  44591. __ret_12; \
  44592. })
  44593. #else
  44594. #define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
  44595. poly8x16_t __s0_13 = __p0_13; \
  44596. poly8x8_t __s2_13 = __p2_13; \
  44597. poly8x16_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44598. poly8x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
  44599. poly8x16_t __ret_13; \
  44600. __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
  44601. __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44602. __ret_13; \
  44603. })
  44604. #endif
  44605. #ifdef __LITTLE_ENDIAN__
  44606. #define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
  44607. poly16x8_t __s0_14 = __p0_14; \
  44608. poly16x4_t __s2_14 = __p2_14; \
  44609. poly16x8_t __ret_14; \
  44610. __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
  44611. __ret_14; \
  44612. })
  44613. #else
  44614. #define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
  44615. poly16x8_t __s0_15 = __p0_15; \
  44616. poly16x4_t __s2_15 = __p2_15; \
  44617. poly16x8_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
  44618. poly16x4_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
  44619. poly16x8_t __ret_15; \
  44620. __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
  44621. __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
  44622. __ret_15; \
  44623. })
  44624. #endif
  44625. #ifdef __LITTLE_ENDIAN__
  44626. #define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
  44627. uint8x16_t __s0_16 = __p0_16; \
  44628. uint8x8_t __s2_16 = __p2_16; \
  44629. uint8x16_t __ret_16; \
  44630. __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
  44631. __ret_16; \
  44632. })
  44633. #else
  44634. #define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
  44635. uint8x16_t __s0_17 = __p0_17; \
  44636. uint8x8_t __s2_17 = __p2_17; \
  44637. uint8x16_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44638. uint8x8_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
  44639. uint8x16_t __ret_17; \
  44640. __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
  44641. __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44642. __ret_17; \
  44643. })
  44644. #endif
  44645. #ifdef __LITTLE_ENDIAN__
  44646. #define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
  44647. uint32x4_t __s0_18 = __p0_18; \
  44648. uint32x2_t __s2_18 = __p2_18; \
  44649. uint32x4_t __ret_18; \
  44650. __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
  44651. __ret_18; \
  44652. })
  44653. #else
  44654. #define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
  44655. uint32x4_t __s0_19 = __p0_19; \
  44656. uint32x2_t __s2_19 = __p2_19; \
  44657. uint32x4_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
  44658. uint32x2_t __rev2_19; __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
  44659. uint32x4_t __ret_19; \
  44660. __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
  44661. __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
  44662. __ret_19; \
  44663. })
  44664. #endif
  44665. #ifdef __LITTLE_ENDIAN__
  44666. #define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
  44667. uint64x2_t __s0_20 = __p0_20; \
  44668. uint64x1_t __s2_20 = __p2_20; \
  44669. uint64x2_t __ret_20; \
  44670. __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
  44671. __ret_20; \
  44672. })
  44673. #else
  44674. #define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
  44675. uint64x2_t __s0_21 = __p0_21; \
  44676. uint64x1_t __s2_21 = __p2_21; \
  44677. uint64x2_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
  44678. uint64x2_t __ret_21; \
  44679. __ret_21 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
  44680. __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
  44681. __ret_21; \
  44682. })
  44683. #endif
  44684. #ifdef __LITTLE_ENDIAN__
  44685. #define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
  44686. uint16x8_t __s0_22 = __p0_22; \
  44687. uint16x4_t __s2_22 = __p2_22; \
  44688. uint16x8_t __ret_22; \
  44689. __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
  44690. __ret_22; \
  44691. })
  44692. #else
  44693. #define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
  44694. uint16x8_t __s0_23 = __p0_23; \
  44695. uint16x4_t __s2_23 = __p2_23; \
  44696. uint16x8_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
  44697. uint16x4_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
  44698. uint16x8_t __ret_23; \
  44699. __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
  44700. __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
  44701. __ret_23; \
  44702. })
  44703. #endif
  44704. #ifdef __LITTLE_ENDIAN__
  44705. #define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
  44706. int8x16_t __s0_24 = __p0_24; \
  44707. int8x8_t __s2_24 = __p2_24; \
  44708. int8x16_t __ret_24; \
  44709. __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
  44710. __ret_24; \
  44711. })
  44712. #else
  44713. #define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
  44714. int8x16_t __s0_25 = __p0_25; \
  44715. int8x8_t __s2_25 = __p2_25; \
  44716. int8x16_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44717. int8x8_t __rev2_25; __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
  44718. int8x16_t __ret_25; \
  44719. __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
  44720. __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  44721. __ret_25; \
  44722. })
  44723. #endif
  44724. #ifdef __LITTLE_ENDIAN__
  44725. #define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
  44726. float32x4_t __s0_26 = __p0_26; \
  44727. float32x2_t __s2_26 = __p2_26; \
  44728. float32x4_t __ret_26; \
  44729. __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
  44730. __ret_26; \
  44731. })
  44732. #else
  44733. #define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
  44734. float32x4_t __s0_27 = __p0_27; \
  44735. float32x2_t __s2_27 = __p2_27; \
  44736. float32x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
  44737. float32x2_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
  44738. float32x4_t __ret_27; \
  44739. __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
  44740. __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
  44741. __ret_27; \
  44742. })
  44743. #endif
  44744. #ifdef __LITTLE_ENDIAN__
  44745. #define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
  44746. int32x4_t __s0_28 = __p0_28; \
  44747. int32x2_t __s2_28 = __p2_28; \
  44748. int32x4_t __ret_28; \
  44749. __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
  44750. __ret_28; \
  44751. })
  44752. #else
  44753. #define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
  44754. int32x4_t __s0_29 = __p0_29; \
  44755. int32x2_t __s2_29 = __p2_29; \
  44756. int32x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
  44757. int32x2_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
  44758. int32x4_t __ret_29; \
  44759. __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
  44760. __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
  44761. __ret_29; \
  44762. })
  44763. #endif
  44764. #ifdef __LITTLE_ENDIAN__
  44765. #define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
  44766. int64x2_t __s0_30 = __p0_30; \
  44767. int64x1_t __s2_30 = __p2_30; \
  44768. int64x2_t __ret_30; \
  44769. __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
  44770. __ret_30; \
  44771. })
  44772. #else
  44773. #define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
  44774. int64x2_t __s0_31 = __p0_31; \
  44775. int64x1_t __s2_31 = __p2_31; \
  44776. int64x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
  44777. int64x2_t __ret_31; \
  44778. __ret_31 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
  44779. __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
  44780. __ret_31; \
  44781. })
  44782. #endif
  44783. #ifdef __LITTLE_ENDIAN__
  44784. #define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
  44785. int16x8_t __s0_32 = __p0_32; \
  44786. int16x4_t __s2_32 = __p2_32; \
  44787. int16x8_t __ret_32; \
  44788. __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
  44789. __ret_32; \
  44790. })
  44791. #else
  44792. #define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
  44793. int16x8_t __s0_33 = __p0_33; \
  44794. int16x4_t __s2_33 = __p2_33; \
  44795. int16x8_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
  44796. int16x4_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
  44797. int16x8_t __ret_33; \
  44798. __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
  44799. __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
  44800. __ret_33; \
  44801. })
  44802. #endif
  44803. #ifdef __LITTLE_ENDIAN__
  44804. #define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
  44805. poly8x8_t __s0_34 = __p0_34; \
  44806. poly8x8_t __s2_34 = __p2_34; \
  44807. poly8x8_t __ret_34; \
  44808. __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
  44809. __ret_34; \
  44810. })
  44811. #else
  44812. #define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
  44813. poly8x8_t __s0_35 = __p0_35; \
  44814. poly8x8_t __s2_35 = __p2_35; \
  44815. poly8x8_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
  44816. poly8x8_t __rev2_35; __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
  44817. poly8x8_t __ret_35; \
  44818. __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
  44819. __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
  44820. __ret_35; \
  44821. })
  44822. #endif
  44823. #ifdef __LITTLE_ENDIAN__
  44824. #define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
  44825. poly16x4_t __s0_36 = __p0_36; \
  44826. poly16x4_t __s2_36 = __p2_36; \
  44827. poly16x4_t __ret_36; \
  44828. __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
  44829. __ret_36; \
  44830. })
  44831. #else
  44832. #define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
  44833. poly16x4_t __s0_37 = __p0_37; \
  44834. poly16x4_t __s2_37 = __p2_37; \
  44835. poly16x4_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
  44836. poly16x4_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
  44837. poly16x4_t __ret_37; \
  44838. __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
  44839. __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
  44840. __ret_37; \
  44841. })
  44842. #endif
  44843. #ifdef __LITTLE_ENDIAN__
  44844. #define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
  44845. uint8x8_t __s0_38 = __p0_38; \
  44846. uint8x8_t __s2_38 = __p2_38; \
  44847. uint8x8_t __ret_38; \
  44848. __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
  44849. __ret_38; \
  44850. })
  44851. #else
  44852. #define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
  44853. uint8x8_t __s0_39 = __p0_39; \
  44854. uint8x8_t __s2_39 = __p2_39; \
  44855. uint8x8_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
  44856. uint8x8_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
  44857. uint8x8_t __ret_39; \
  44858. __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
  44859. __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
  44860. __ret_39; \
  44861. })
  44862. #endif
  44863. #ifdef __LITTLE_ENDIAN__
  44864. #define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
  44865. uint32x2_t __s0_40 = __p0_40; \
  44866. uint32x2_t __s2_40 = __p2_40; \
  44867. uint32x2_t __ret_40; \
  44868. __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
  44869. __ret_40; \
  44870. })
  44871. #else
  44872. #define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
  44873. uint32x2_t __s0_41 = __p0_41; \
  44874. uint32x2_t __s2_41 = __p2_41; \
  44875. uint32x2_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
  44876. uint32x2_t __rev2_41; __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
  44877. uint32x2_t __ret_41; \
  44878. __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
  44879. __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
  44880. __ret_41; \
  44881. })
  44882. #endif
  44883. #ifdef __LITTLE_ENDIAN__
  44884. #define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
  44885. uint64x1_t __s0_42 = __p0_42; \
  44886. uint64x1_t __s2_42 = __p2_42; \
  44887. uint64x1_t __ret_42; \
  44888. __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
  44889. __ret_42; \
  44890. })
  44891. #else
  44892. #define vcopy_lane_u64(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
  44893. uint64x1_t __s0_43 = __p0_43; \
  44894. uint64x1_t __s2_43 = __p2_43; \
  44895. uint64x1_t __ret_43; \
  44896. __ret_43 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_43, __p3_43), __s0_43, __p1_43); \
  44897. __ret_43; \
  44898. })
  44899. #endif
  44900. #ifdef __LITTLE_ENDIAN__
  44901. #define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
  44902. uint16x4_t __s0_44 = __p0_44; \
  44903. uint16x4_t __s2_44 = __p2_44; \
  44904. uint16x4_t __ret_44; \
  44905. __ret_44 = vset_lane_u16(vget_lane_u16(__s2_44, __p3_44), __s0_44, __p1_44); \
  44906. __ret_44; \
  44907. })
  44908. #else
  44909. #define vcopy_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
  44910. uint16x4_t __s0_45 = __p0_45; \
  44911. uint16x4_t __s2_45 = __p2_45; \
  44912. uint16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \
  44913. uint16x4_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
  44914. uint16x4_t __ret_45; \
  44915. __ret_45 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_45, __p3_45), __rev0_45, __p1_45); \
  44916. __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \
  44917. __ret_45; \
  44918. })
  44919. #endif
  44920. #ifdef __LITTLE_ENDIAN__
  44921. #define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
  44922. int8x8_t __s0_46 = __p0_46; \
  44923. int8x8_t __s2_46 = __p2_46; \
  44924. int8x8_t __ret_46; \
  44925. __ret_46 = vset_lane_s8(vget_lane_s8(__s2_46, __p3_46), __s0_46, __p1_46); \
  44926. __ret_46; \
  44927. })
  44928. #else
  44929. #define vcopy_lane_s8(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
  44930. int8x8_t __s0_47 = __p0_47; \
  44931. int8x8_t __s2_47 = __p2_47; \
  44932. int8x8_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 7, 6, 5, 4, 3, 2, 1, 0); \
  44933. int8x8_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 7, 6, 5, 4, 3, 2, 1, 0); \
  44934. int8x8_t __ret_47; \
  44935. __ret_47 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_47, __p3_47), __rev0_47, __p1_47); \
  44936. __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 7, 6, 5, 4, 3, 2, 1, 0); \
  44937. __ret_47; \
  44938. })
  44939. #endif
  44940. #ifdef __LITTLE_ENDIAN__
  44941. #define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
  44942. float32x2_t __s0_48 = __p0_48; \
  44943. float32x2_t __s2_48 = __p2_48; \
  44944. float32x2_t __ret_48; \
  44945. __ret_48 = vset_lane_f32(vget_lane_f32(__s2_48, __p3_48), __s0_48, __p1_48); \
  44946. __ret_48; \
  44947. })
  44948. #else
  44949. #define vcopy_lane_f32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
  44950. float32x2_t __s0_49 = __p0_49; \
  44951. float32x2_t __s2_49 = __p2_49; \
  44952. float32x2_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 1, 0); \
  44953. float32x2_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
  44954. float32x2_t __ret_49; \
  44955. __ret_49 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_49, __p3_49), __rev0_49, __p1_49); \
  44956. __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 1, 0); \
  44957. __ret_49; \
  44958. })
  44959. #endif
  44960. #ifdef __LITTLE_ENDIAN__
  44961. #define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
  44962. int32x2_t __s0_50 = __p0_50; \
  44963. int32x2_t __s2_50 = __p2_50; \
  44964. int32x2_t __ret_50; \
  44965. __ret_50 = vset_lane_s32(vget_lane_s32(__s2_50, __p3_50), __s0_50, __p1_50); \
  44966. __ret_50; \
  44967. })
  44968. #else
  44969. #define vcopy_lane_s32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
  44970. int32x2_t __s0_51 = __p0_51; \
  44971. int32x2_t __s2_51 = __p2_51; \
  44972. int32x2_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 1, 0); \
  44973. int32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \
  44974. int32x2_t __ret_51; \
  44975. __ret_51 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_51, __p3_51), __rev0_51, __p1_51); \
  44976. __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 1, 0); \
  44977. __ret_51; \
  44978. })
  44979. #endif
  44980. #ifdef __LITTLE_ENDIAN__
  44981. #define vcopy_lane_s64(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
  44982. int64x1_t __s0_52 = __p0_52; \
  44983. int64x1_t __s2_52 = __p2_52; \
  44984. int64x1_t __ret_52; \
  44985. __ret_52 = vset_lane_s64(vget_lane_s64(__s2_52, __p3_52), __s0_52, __p1_52); \
  44986. __ret_52; \
  44987. })
  44988. #else
  44989. #define vcopy_lane_s64(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
  44990. int64x1_t __s0_53 = __p0_53; \
  44991. int64x1_t __s2_53 = __p2_53; \
  44992. int64x1_t __ret_53; \
  44993. __ret_53 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_53, __p3_53), __s0_53, __p1_53); \
  44994. __ret_53; \
  44995. })
  44996. #endif
  44997. #ifdef __LITTLE_ENDIAN__
  44998. #define vcopy_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
  44999. int16x4_t __s0_54 = __p0_54; \
  45000. int16x4_t __s2_54 = __p2_54; \
  45001. int16x4_t __ret_54; \
  45002. __ret_54 = vset_lane_s16(vget_lane_s16(__s2_54, __p3_54), __s0_54, __p1_54); \
  45003. __ret_54; \
  45004. })
  45005. #else
  45006. #define vcopy_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
  45007. int16x4_t __s0_55 = __p0_55; \
  45008. int16x4_t __s2_55 = __p2_55; \
  45009. int16x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
  45010. int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
  45011. int16x4_t __ret_55; \
  45012. __ret_55 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_55, __p3_55), __rev0_55, __p1_55); \
  45013. __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
  45014. __ret_55; \
  45015. })
  45016. #endif
  45017. #ifdef __LITTLE_ENDIAN__
  45018. #define vcopyq_laneq_p8(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
  45019. poly8x16_t __s0_56 = __p0_56; \
  45020. poly8x16_t __s2_56 = __p2_56; \
  45021. poly8x16_t __ret_56; \
  45022. __ret_56 = vsetq_lane_p8(vgetq_lane_p8(__s2_56, __p3_56), __s0_56, __p1_56); \
  45023. __ret_56; \
  45024. })
  45025. #else
  45026. #define vcopyq_laneq_p8(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
  45027. poly8x16_t __s0_57 = __p0_57; \
  45028. poly8x16_t __s2_57 = __p2_57; \
  45029. poly8x16_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45030. poly8x16_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45031. poly8x16_t __ret_57; \
  45032. __ret_57 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_57, __p3_57), __rev0_57, __p1_57); \
  45033. __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45034. __ret_57; \
  45035. })
  45036. #endif
  45037. #ifdef __LITTLE_ENDIAN__
  45038. #define vcopyq_laneq_p16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
  45039. poly16x8_t __s0_58 = __p0_58; \
  45040. poly16x8_t __s2_58 = __p2_58; \
  45041. poly16x8_t __ret_58; \
  45042. __ret_58 = vsetq_lane_p16(vgetq_lane_p16(__s2_58, __p3_58), __s0_58, __p1_58); \
  45043. __ret_58; \
  45044. })
  45045. #else
  45046. #define vcopyq_laneq_p16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
  45047. poly16x8_t __s0_59 = __p0_59; \
  45048. poly16x8_t __s2_59 = __p2_59; \
  45049. poly16x8_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 7, 6, 5, 4, 3, 2, 1, 0); \
  45050. poly16x8_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 7, 6, 5, 4, 3, 2, 1, 0); \
  45051. poly16x8_t __ret_59; \
  45052. __ret_59 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_59, __p3_59), __rev0_59, __p1_59); \
  45053. __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 7, 6, 5, 4, 3, 2, 1, 0); \
  45054. __ret_59; \
  45055. })
  45056. #endif
  45057. #ifdef __LITTLE_ENDIAN__
  45058. #define vcopyq_laneq_u8(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
  45059. uint8x16_t __s0_60 = __p0_60; \
  45060. uint8x16_t __s2_60 = __p2_60; \
  45061. uint8x16_t __ret_60; \
  45062. __ret_60 = vsetq_lane_u8(vgetq_lane_u8(__s2_60, __p3_60), __s0_60, __p1_60); \
  45063. __ret_60; \
  45064. })
  45065. #else
  45066. #define vcopyq_laneq_u8(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
  45067. uint8x16_t __s0_61 = __p0_61; \
  45068. uint8x16_t __s2_61 = __p2_61; \
  45069. uint8x16_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45070. uint8x16_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45071. uint8x16_t __ret_61; \
  45072. __ret_61 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_61, __p3_61), __rev0_61, __p1_61); \
  45073. __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45074. __ret_61; \
  45075. })
  45076. #endif
  45077. #ifdef __LITTLE_ENDIAN__
  45078. #define vcopyq_laneq_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
  45079. uint32x4_t __s0_62 = __p0_62; \
  45080. uint32x4_t __s2_62 = __p2_62; \
  45081. uint32x4_t __ret_62; \
  45082. __ret_62 = vsetq_lane_u32(vgetq_lane_u32(__s2_62, __p3_62), __s0_62, __p1_62); \
  45083. __ret_62; \
  45084. })
  45085. #else
  45086. #define vcopyq_laneq_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
  45087. uint32x4_t __s0_63 = __p0_63; \
  45088. uint32x4_t __s2_63 = __p2_63; \
  45089. uint32x4_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
  45090. uint32x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 3, 2, 1, 0); \
  45091. uint32x4_t __ret_63; \
  45092. __ret_63 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_63, __p3_63), __rev0_63, __p1_63); \
  45093. __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
  45094. __ret_63; \
  45095. })
  45096. #endif
  45097. #ifdef __LITTLE_ENDIAN__
  45098. #define vcopyq_laneq_u64(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
  45099. uint64x2_t __s0_64 = __p0_64; \
  45100. uint64x2_t __s2_64 = __p2_64; \
  45101. uint64x2_t __ret_64; \
  45102. __ret_64 = vsetq_lane_u64(vgetq_lane_u64(__s2_64, __p3_64), __s0_64, __p1_64); \
  45103. __ret_64; \
  45104. })
  45105. #else
  45106. #define vcopyq_laneq_u64(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
  45107. uint64x2_t __s0_65 = __p0_65; \
  45108. uint64x2_t __s2_65 = __p2_65; \
  45109. uint64x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 1, 0); \
  45110. uint64x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 1, 0); \
  45111. uint64x2_t __ret_65; \
  45112. __ret_65 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_65, __p3_65), __rev0_65, __p1_65); \
  45113. __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 1, 0); \
  45114. __ret_65; \
  45115. })
  45116. #endif
  45117. #ifdef __LITTLE_ENDIAN__
  45118. #define vcopyq_laneq_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
  45119. uint16x8_t __s0_66 = __p0_66; \
  45120. uint16x8_t __s2_66 = __p2_66; \
  45121. uint16x8_t __ret_66; \
  45122. __ret_66 = vsetq_lane_u16(vgetq_lane_u16(__s2_66, __p3_66), __s0_66, __p1_66); \
  45123. __ret_66; \
  45124. })
  45125. #else
  45126. #define vcopyq_laneq_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
  45127. uint16x8_t __s0_67 = __p0_67; \
  45128. uint16x8_t __s2_67 = __p2_67; \
  45129. uint16x8_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 7, 6, 5, 4, 3, 2, 1, 0); \
  45130. uint16x8_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 7, 6, 5, 4, 3, 2, 1, 0); \
  45131. uint16x8_t __ret_67; \
  45132. __ret_67 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_67, __p3_67), __rev0_67, __p1_67); \
  45133. __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 7, 6, 5, 4, 3, 2, 1, 0); \
  45134. __ret_67; \
  45135. })
  45136. #endif
  45137. #ifdef __LITTLE_ENDIAN__
  45138. #define vcopyq_laneq_s8(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
  45139. int8x16_t __s0_68 = __p0_68; \
  45140. int8x16_t __s2_68 = __p2_68; \
  45141. int8x16_t __ret_68; \
  45142. __ret_68 = vsetq_lane_s8(vgetq_lane_s8(__s2_68, __p3_68), __s0_68, __p1_68); \
  45143. __ret_68; \
  45144. })
  45145. #else
  45146. #define vcopyq_laneq_s8(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
  45147. int8x16_t __s0_69 = __p0_69; \
  45148. int8x16_t __s2_69 = __p2_69; \
  45149. int8x16_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45150. int8x16_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45151. int8x16_t __ret_69; \
  45152. __ret_69 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_69, __p3_69), __rev0_69, __p1_69); \
  45153. __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45154. __ret_69; \
  45155. })
  45156. #endif
  45157. #ifdef __LITTLE_ENDIAN__
  45158. #define vcopyq_laneq_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
  45159. float32x4_t __s0_70 = __p0_70; \
  45160. float32x4_t __s2_70 = __p2_70; \
  45161. float32x4_t __ret_70; \
  45162. __ret_70 = vsetq_lane_f32(vgetq_lane_f32(__s2_70, __p3_70), __s0_70, __p1_70); \
  45163. __ret_70; \
  45164. })
  45165. #else
  45166. #define vcopyq_laneq_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
  45167. float32x4_t __s0_71 = __p0_71; \
  45168. float32x4_t __s2_71 = __p2_71; \
  45169. float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
  45170. float32x4_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
  45171. float32x4_t __ret_71; \
  45172. __ret_71 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
  45173. __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
  45174. __ret_71; \
  45175. })
  45176. #endif
  45177. #ifdef __LITTLE_ENDIAN__
  45178. #define vcopyq_laneq_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
  45179. int32x4_t __s0_72 = __p0_72; \
  45180. int32x4_t __s2_72 = __p2_72; \
  45181. int32x4_t __ret_72; \
  45182. __ret_72 = vsetq_lane_s32(vgetq_lane_s32(__s2_72, __p3_72), __s0_72, __p1_72); \
  45183. __ret_72; \
  45184. })
  45185. #else
  45186. #define vcopyq_laneq_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
  45187. int32x4_t __s0_73 = __p0_73; \
  45188. int32x4_t __s2_73 = __p2_73; \
  45189. int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \
  45190. int32x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \
  45191. int32x4_t __ret_73; \
  45192. __ret_73 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_73, __p3_73), __rev0_73, __p1_73); \
  45193. __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \
  45194. __ret_73; \
  45195. })
  45196. #endif
  45197. #ifdef __LITTLE_ENDIAN__
  45198. #define vcopyq_laneq_s64(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
  45199. int64x2_t __s0_74 = __p0_74; \
  45200. int64x2_t __s2_74 = __p2_74; \
  45201. int64x2_t __ret_74; \
  45202. __ret_74 = vsetq_lane_s64(vgetq_lane_s64(__s2_74, __p3_74), __s0_74, __p1_74); \
  45203. __ret_74; \
  45204. })
  45205. #else
  45206. #define vcopyq_laneq_s64(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
  45207. int64x2_t __s0_75 = __p0_75; \
  45208. int64x2_t __s2_75 = __p2_75; \
  45209. int64x2_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 1, 0); \
  45210. int64x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \
  45211. int64x2_t __ret_75; \
  45212. __ret_75 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_75, __p3_75), __rev0_75, __p1_75); \
  45213. __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 1, 0); \
  45214. __ret_75; \
  45215. })
  45216. #endif
  45217. #ifdef __LITTLE_ENDIAN__
  45218. #define vcopyq_laneq_s16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
  45219. int16x8_t __s0_76 = __p0_76; \
  45220. int16x8_t __s2_76 = __p2_76; \
  45221. int16x8_t __ret_76; \
  45222. __ret_76 = vsetq_lane_s16(vgetq_lane_s16(__s2_76, __p3_76), __s0_76, __p1_76); \
  45223. __ret_76; \
  45224. })
  45225. #else
  45226. #define vcopyq_laneq_s16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
  45227. int16x8_t __s0_77 = __p0_77; \
  45228. int16x8_t __s2_77 = __p2_77; \
  45229. int16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
  45230. int16x8_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 7, 6, 5, 4, 3, 2, 1, 0); \
  45231. int16x8_t __ret_77; \
  45232. __ret_77 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_77, __p3_77), __rev0_77, __p1_77); \
  45233. __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
  45234. __ret_77; \
  45235. })
  45236. #endif
  45237. #ifdef __LITTLE_ENDIAN__
  45238. #define vcopy_laneq_p8(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
  45239. poly8x8_t __s0_78 = __p0_78; \
  45240. poly8x16_t __s2_78 = __p2_78; \
  45241. poly8x8_t __ret_78; \
  45242. __ret_78 = vset_lane_p8(vgetq_lane_p8(__s2_78, __p3_78), __s0_78, __p1_78); \
  45243. __ret_78; \
  45244. })
  45245. #else
  45246. #define vcopy_laneq_p8(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
  45247. poly8x8_t __s0_79 = __p0_79; \
  45248. poly8x16_t __s2_79 = __p2_79; \
  45249. poly8x8_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 7, 6, 5, 4, 3, 2, 1, 0); \
  45250. poly8x16_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45251. poly8x8_t __ret_79; \
  45252. __ret_79 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_79, __p3_79), __rev0_79, __p1_79); \
  45253. __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 7, 6, 5, 4, 3, 2, 1, 0); \
  45254. __ret_79; \
  45255. })
  45256. #endif
  45257. #ifdef __LITTLE_ENDIAN__
  45258. #define vcopy_laneq_p16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
  45259. poly16x4_t __s0_80 = __p0_80; \
  45260. poly16x8_t __s2_80 = __p2_80; \
  45261. poly16x4_t __ret_80; \
  45262. __ret_80 = vset_lane_p16(vgetq_lane_p16(__s2_80, __p3_80), __s0_80, __p1_80); \
  45263. __ret_80; \
  45264. })
  45265. #else
  45266. #define vcopy_laneq_p16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
  45267. poly16x4_t __s0_81 = __p0_81; \
  45268. poly16x8_t __s2_81 = __p2_81; \
  45269. poly16x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
  45270. poly16x8_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 7, 6, 5, 4, 3, 2, 1, 0); \
  45271. poly16x4_t __ret_81; \
  45272. __ret_81 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_81, __p3_81), __rev0_81, __p1_81); \
  45273. __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
  45274. __ret_81; \
  45275. })
  45276. #endif
  45277. #ifdef __LITTLE_ENDIAN__
  45278. #define vcopy_laneq_u8(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
  45279. uint8x8_t __s0_82 = __p0_82; \
  45280. uint8x16_t __s2_82 = __p2_82; \
  45281. uint8x8_t __ret_82; \
  45282. __ret_82 = vset_lane_u8(vgetq_lane_u8(__s2_82, __p3_82), __s0_82, __p1_82); \
  45283. __ret_82; \
  45284. })
  45285. #else
  45286. #define vcopy_laneq_u8(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
  45287. uint8x8_t __s0_83 = __p0_83; \
  45288. uint8x16_t __s2_83 = __p2_83; \
  45289. uint8x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \
  45290. uint8x16_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45291. uint8x8_t __ret_83; \
  45292. __ret_83 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_83, __p3_83), __rev0_83, __p1_83); \
  45293. __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \
  45294. __ret_83; \
  45295. })
  45296. #endif
  45297. #ifdef __LITTLE_ENDIAN__
  45298. #define vcopy_laneq_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
  45299. uint32x2_t __s0_84 = __p0_84; \
  45300. uint32x4_t __s2_84 = __p2_84; \
  45301. uint32x2_t __ret_84; \
  45302. __ret_84 = vset_lane_u32(vgetq_lane_u32(__s2_84, __p3_84), __s0_84, __p1_84); \
  45303. __ret_84; \
  45304. })
  45305. #else
  45306. #define vcopy_laneq_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
  45307. uint32x2_t __s0_85 = __p0_85; \
  45308. uint32x4_t __s2_85 = __p2_85; \
  45309. uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 1, 0); \
  45310. uint32x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \
  45311. uint32x2_t __ret_85; \
  45312. __ret_85 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_85, __p3_85), __rev0_85, __p1_85); \
  45313. __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 1, 0); \
  45314. __ret_85; \
  45315. })
  45316. #endif
  45317. #ifdef __LITTLE_ENDIAN__
  45318. #define vcopy_laneq_u64(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
  45319. uint64x1_t __s0_86 = __p0_86; \
  45320. uint64x2_t __s2_86 = __p2_86; \
  45321. uint64x1_t __ret_86; \
  45322. __ret_86 = vset_lane_u64(vgetq_lane_u64(__s2_86, __p3_86), __s0_86, __p1_86); \
  45323. __ret_86; \
  45324. })
  45325. #else
  45326. #define vcopy_laneq_u64(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
  45327. uint64x1_t __s0_87 = __p0_87; \
  45328. uint64x2_t __s2_87 = __p2_87; \
  45329. uint64x2_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 1, 0); \
  45330. uint64x1_t __ret_87; \
  45331. __ret_87 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_87, __p3_87), __s0_87, __p1_87); \
  45332. __ret_87; \
  45333. })
  45334. #endif
  45335. #ifdef __LITTLE_ENDIAN__
  45336. #define vcopy_laneq_u16(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
  45337. uint16x4_t __s0_88 = __p0_88; \
  45338. uint16x8_t __s2_88 = __p2_88; \
  45339. uint16x4_t __ret_88; \
  45340. __ret_88 = vset_lane_u16(vgetq_lane_u16(__s2_88, __p3_88), __s0_88, __p1_88); \
  45341. __ret_88; \
  45342. })
  45343. #else
  45344. #define vcopy_laneq_u16(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
  45345. uint16x4_t __s0_89 = __p0_89; \
  45346. uint16x8_t __s2_89 = __p2_89; \
  45347. uint16x4_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
  45348. uint16x8_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 7, 6, 5, 4, 3, 2, 1, 0); \
  45349. uint16x4_t __ret_89; \
  45350. __ret_89 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_89, __p3_89), __rev0_89, __p1_89); \
  45351. __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
  45352. __ret_89; \
  45353. })
  45354. #endif
  45355. #ifdef __LITTLE_ENDIAN__
  45356. #define vcopy_laneq_s8(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
  45357. int8x8_t __s0_90 = __p0_90; \
  45358. int8x16_t __s2_90 = __p2_90; \
  45359. int8x8_t __ret_90; \
  45360. __ret_90 = vset_lane_s8(vgetq_lane_s8(__s2_90, __p3_90), __s0_90, __p1_90); \
  45361. __ret_90; \
  45362. })
  45363. #else
  45364. #define vcopy_laneq_s8(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
  45365. int8x8_t __s0_91 = __p0_91; \
  45366. int8x16_t __s2_91 = __p2_91; \
  45367. int8x8_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
  45368. int8x16_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  45369. int8x8_t __ret_91; \
  45370. __ret_91 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_91, __p3_91), __rev0_91, __p1_91); \
  45371. __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
  45372. __ret_91; \
  45373. })
  45374. #endif
  45375. #ifdef __LITTLE_ENDIAN__
  45376. #define vcopy_laneq_f32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
  45377. float32x2_t __s0_92 = __p0_92; \
  45378. float32x4_t __s2_92 = __p2_92; \
  45379. float32x2_t __ret_92; \
  45380. __ret_92 = vset_lane_f32(vgetq_lane_f32(__s2_92, __p3_92), __s0_92, __p1_92); \
  45381. __ret_92; \
  45382. })
  45383. #else
  45384. #define vcopy_laneq_f32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
  45385. float32x2_t __s0_93 = __p0_93; \
  45386. float32x4_t __s2_93 = __p2_93; \
  45387. float32x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
  45388. float32x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
  45389. float32x2_t __ret_93; \
  45390. __ret_93 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
  45391. __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
  45392. __ret_93; \
  45393. })
  45394. #endif
  45395. #ifdef __LITTLE_ENDIAN__
  45396. #define vcopy_laneq_s32(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
  45397. int32x2_t __s0_94 = __p0_94; \
  45398. int32x4_t __s2_94 = __p2_94; \
  45399. int32x2_t __ret_94; \
  45400. __ret_94 = vset_lane_s32(vgetq_lane_s32(__s2_94, __p3_94), __s0_94, __p1_94); \
  45401. __ret_94; \
  45402. })
  45403. #else
  45404. #define vcopy_laneq_s32(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
  45405. int32x2_t __s0_95 = __p0_95; \
  45406. int32x4_t __s2_95 = __p2_95; \
  45407. int32x2_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 1, 0); \
  45408. int32x4_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 3, 2, 1, 0); \
  45409. int32x2_t __ret_95; \
  45410. __ret_95 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_95, __p3_95), __rev0_95, __p1_95); \
  45411. __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 1, 0); \
  45412. __ret_95; \
  45413. })
  45414. #endif
  45415. #ifdef __LITTLE_ENDIAN__
  45416. #define vcopy_laneq_s64(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
  45417. int64x1_t __s0_96 = __p0_96; \
  45418. int64x2_t __s2_96 = __p2_96; \
  45419. int64x1_t __ret_96; \
  45420. __ret_96 = vset_lane_s64(vgetq_lane_s64(__s2_96, __p3_96), __s0_96, __p1_96); \
  45421. __ret_96; \
  45422. })
  45423. #else
  45424. #define vcopy_laneq_s64(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
  45425. int64x1_t __s0_97 = __p0_97; \
  45426. int64x2_t __s2_97 = __p2_97; \
  45427. int64x2_t __rev2_97; __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 1, 0); \
  45428. int64x1_t __ret_97; \
  45429. __ret_97 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_97, __p3_97), __s0_97, __p1_97); \
  45430. __ret_97; \
  45431. })
  45432. #endif
  45433. #ifdef __LITTLE_ENDIAN__
  45434. #define vcopy_laneq_s16(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
  45435. int16x4_t __s0_98 = __p0_98; \
  45436. int16x8_t __s2_98 = __p2_98; \
  45437. int16x4_t __ret_98; \
  45438. __ret_98 = vset_lane_s16(vgetq_lane_s16(__s2_98, __p3_98), __s0_98, __p1_98); \
  45439. __ret_98; \
  45440. })
  45441. #else
  45442. #define vcopy_laneq_s16(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
  45443. int16x4_t __s0_99 = __p0_99; \
  45444. int16x8_t __s2_99 = __p2_99; \
  45445. int16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \
  45446. int16x8_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 7, 6, 5, 4, 3, 2, 1, 0); \
  45447. int16x4_t __ret_99; \
  45448. __ret_99 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_99, __p3_99), __rev0_99, __p1_99); \
  45449. __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \
  45450. __ret_99; \
  45451. })
  45452. #endif
  45453. #ifdef __LITTLE_ENDIAN__
  45454. __ai poly64x1_t vcreate_p64(uint64_t __p0) {
  45455. poly64x1_t __ret;
  45456. __ret = (poly64x1_t)(__p0);
  45457. return __ret;
  45458. }
  45459. #else
  45460. __ai poly64x1_t vcreate_p64(uint64_t __p0) {
  45461. poly64x1_t __ret;
  45462. __ret = (poly64x1_t)(__p0);
  45463. return __ret;
  45464. }
  45465. #endif
  45466. #ifdef __LITTLE_ENDIAN__
  45467. __ai float64x1_t vcreate_f64(uint64_t __p0) {
  45468. float64x1_t __ret;
  45469. __ret = (float64x1_t)(__p0);
  45470. return __ret;
  45471. }
  45472. #else
  45473. __ai float64x1_t vcreate_f64(uint64_t __p0) {
  45474. float64x1_t __ret;
  45475. __ret = (float64x1_t)(__p0);
  45476. return __ret;
  45477. }
  45478. #endif
  45479. #ifdef __LITTLE_ENDIAN__
  45480. __ai float32_t vcvts_f32_s32(int32_t __p0) {
  45481. float32_t __ret;
  45482. __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
  45483. return __ret;
  45484. }
  45485. #else
  45486. __ai float32_t vcvts_f32_s32(int32_t __p0) {
  45487. float32_t __ret;
  45488. __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
  45489. return __ret;
  45490. }
  45491. #endif
  45492. #ifdef __LITTLE_ENDIAN__
  45493. __ai float32_t vcvts_f32_u32(uint32_t __p0) {
  45494. float32_t __ret;
  45495. __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
  45496. return __ret;
  45497. }
  45498. #else
  45499. __ai float32_t vcvts_f32_u32(uint32_t __p0) {
  45500. float32_t __ret;
  45501. __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
  45502. return __ret;
  45503. }
  45504. #endif
  45505. #ifdef __LITTLE_ENDIAN__
  45506. __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
  45507. float32x2_t __ret;
  45508. __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
  45509. return __ret;
  45510. }
  45511. #else
  45512. __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
  45513. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45514. float32x2_t __ret;
  45515. __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
  45516. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45517. return __ret;
  45518. }
  45519. __ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
  45520. float32x2_t __ret;
  45521. __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
  45522. return __ret;
  45523. }
  45524. #endif
  45525. #ifdef __LITTLE_ENDIAN__
  45526. __ai float64_t vcvtd_f64_s64(int64_t __p0) {
  45527. float64_t __ret;
  45528. __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
  45529. return __ret;
  45530. }
  45531. #else
  45532. __ai float64_t vcvtd_f64_s64(int64_t __p0) {
  45533. float64_t __ret;
  45534. __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
  45535. return __ret;
  45536. }
  45537. #endif
  45538. #ifdef __LITTLE_ENDIAN__
  45539. __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
  45540. float64_t __ret;
  45541. __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
  45542. return __ret;
  45543. }
  45544. #else
  45545. __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
  45546. float64_t __ret;
  45547. __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
  45548. return __ret;
  45549. }
  45550. #endif
  45551. #ifdef __LITTLE_ENDIAN__
  45552. __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
  45553. float64x2_t __ret;
  45554. __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
  45555. return __ret;
  45556. }
  45557. #else
  45558. __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
  45559. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45560. float64x2_t __ret;
  45561. __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
  45562. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45563. return __ret;
  45564. }
  45565. #endif
  45566. #ifdef __LITTLE_ENDIAN__
  45567. __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
  45568. float64x2_t __ret;
  45569. __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
  45570. return __ret;
  45571. }
  45572. #else
  45573. __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
  45574. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45575. float64x2_t __ret;
  45576. __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
  45577. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45578. return __ret;
  45579. }
  45580. #endif
  45581. #ifdef __LITTLE_ENDIAN__
  45582. __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
  45583. float64x1_t __ret;
  45584. __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
  45585. return __ret;
  45586. }
  45587. #else
  45588. __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
  45589. float64x1_t __ret;
  45590. __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
  45591. return __ret;
  45592. }
  45593. #endif
  45594. #ifdef __LITTLE_ENDIAN__
  45595. __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
  45596. float64x1_t __ret;
  45597. __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
  45598. return __ret;
  45599. }
  45600. #else
  45601. __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
  45602. float64x1_t __ret;
  45603. __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
  45604. return __ret;
  45605. }
  45606. #endif
  45607. #ifdef __LITTLE_ENDIAN__
  45608. __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
  45609. float64x2_t __ret;
  45610. __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
  45611. return __ret;
  45612. }
  45613. #else
  45614. __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
  45615. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45616. float64x2_t __ret;
  45617. __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
  45618. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45619. return __ret;
  45620. }
  45621. __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
  45622. float64x2_t __ret;
  45623. __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
  45624. return __ret;
  45625. }
  45626. #endif
  45627. #ifdef __LITTLE_ENDIAN__
  45628. __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
  45629. float16x8_t __ret;
  45630. __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
  45631. return __ret;
  45632. }
  45633. #else
  45634. __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
  45635. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  45636. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  45637. float16x8_t __ret;
  45638. __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
  45639. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  45640. return __ret;
  45641. }
  45642. #endif
  45643. #ifdef __LITTLE_ENDIAN__
  45644. __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
  45645. float32x4_t __ret;
  45646. __ret = vcvt_f32_f16(vget_high_f16(__p0));
  45647. return __ret;
  45648. }
  45649. #else
  45650. __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
  45651. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  45652. float32x4_t __ret;
  45653. __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
  45654. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  45655. return __ret;
  45656. }
  45657. #endif
  45658. #ifdef __LITTLE_ENDIAN__
  45659. __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
  45660. float32x4_t __ret;
  45661. __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
  45662. return __ret;
  45663. }
  45664. #else
  45665. __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
  45666. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45667. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  45668. float32x4_t __ret;
  45669. __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
  45670. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  45671. return __ret;
  45672. }
  45673. #endif
  45674. #ifdef __LITTLE_ENDIAN__
  45675. __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
  45676. float64x2_t __ret;
  45677. __ret = vcvt_f64_f32(vget_high_f32(__p0));
  45678. return __ret;
  45679. }
  45680. #else
  45681. __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
  45682. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  45683. float64x2_t __ret;
  45684. __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
  45685. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45686. return __ret;
  45687. }
  45688. #endif
  45689. #ifdef __LITTLE_ENDIAN__
  45690. #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
  45691. uint32_t __s0 = __p0; \
  45692. float32_t __ret; \
  45693. __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
  45694. __ret; \
  45695. })
  45696. #else
  45697. #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
  45698. uint32_t __s0 = __p0; \
  45699. float32_t __ret; \
  45700. __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
  45701. __ret; \
  45702. })
  45703. #endif
  45704. #ifdef __LITTLE_ENDIAN__
  45705. #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
  45706. int32_t __s0 = __p0; \
  45707. float32_t __ret; \
  45708. __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
  45709. __ret; \
  45710. })
  45711. #else
  45712. #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
  45713. int32_t __s0 = __p0; \
  45714. float32_t __ret; \
  45715. __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
  45716. __ret; \
  45717. })
  45718. #endif
  45719. #ifdef __LITTLE_ENDIAN__
  45720. #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
  45721. uint64x2_t __s0 = __p0; \
  45722. float64x2_t __ret; \
  45723. __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
  45724. __ret; \
  45725. })
  45726. #else
  45727. #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
  45728. uint64x2_t __s0 = __p0; \
  45729. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  45730. float64x2_t __ret; \
  45731. __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
  45732. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  45733. __ret; \
  45734. })
  45735. #endif
  45736. #ifdef __LITTLE_ENDIAN__
  45737. #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
  45738. int64x2_t __s0 = __p0; \
  45739. float64x2_t __ret; \
  45740. __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
  45741. __ret; \
  45742. })
  45743. #else
  45744. #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
  45745. int64x2_t __s0 = __p0; \
  45746. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  45747. float64x2_t __ret; \
  45748. __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
  45749. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  45750. __ret; \
  45751. })
  45752. #endif
  45753. #ifdef __LITTLE_ENDIAN__
  45754. #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
  45755. uint64x1_t __s0 = __p0; \
  45756. float64x1_t __ret; \
  45757. __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
  45758. __ret; \
  45759. })
  45760. #else
  45761. #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
  45762. uint64x1_t __s0 = __p0; \
  45763. float64x1_t __ret; \
  45764. __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
  45765. __ret; \
  45766. })
  45767. #endif
  45768. #ifdef __LITTLE_ENDIAN__
  45769. #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
  45770. int64x1_t __s0 = __p0; \
  45771. float64x1_t __ret; \
  45772. __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
  45773. __ret; \
  45774. })
  45775. #else
  45776. #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
  45777. int64x1_t __s0 = __p0; \
  45778. float64x1_t __ret; \
  45779. __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
  45780. __ret; \
  45781. })
  45782. #endif
  45783. #ifdef __LITTLE_ENDIAN__
  45784. #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
  45785. uint64_t __s0 = __p0; \
  45786. float64_t __ret; \
  45787. __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
  45788. __ret; \
  45789. })
  45790. #else
  45791. #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
  45792. uint64_t __s0 = __p0; \
  45793. float64_t __ret; \
  45794. __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
  45795. __ret; \
  45796. })
  45797. #endif
  45798. #ifdef __LITTLE_ENDIAN__
  45799. #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
  45800. int64_t __s0 = __p0; \
  45801. float64_t __ret; \
  45802. __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
  45803. __ret; \
  45804. })
  45805. #else
  45806. #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
  45807. int64_t __s0 = __p0; \
  45808. float64_t __ret; \
  45809. __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
  45810. __ret; \
  45811. })
  45812. #endif
  45813. #ifdef __LITTLE_ENDIAN__
  45814. #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
  45815. float32_t __s0 = __p0; \
  45816. int32_t __ret; \
  45817. __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
  45818. __ret; \
  45819. })
  45820. #else
  45821. #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
  45822. float32_t __s0 = __p0; \
  45823. int32_t __ret; \
  45824. __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
  45825. __ret; \
  45826. })
  45827. #endif
  45828. #ifdef __LITTLE_ENDIAN__
  45829. #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
  45830. float64x2_t __s0 = __p0; \
  45831. int64x2_t __ret; \
  45832. __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
  45833. __ret; \
  45834. })
  45835. #else
  45836. #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
  45837. float64x2_t __s0 = __p0; \
  45838. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  45839. int64x2_t __ret; \
  45840. __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
  45841. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  45842. __ret; \
  45843. })
  45844. #endif
  45845. #ifdef __LITTLE_ENDIAN__
  45846. #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
  45847. float64x1_t __s0 = __p0; \
  45848. int64x1_t __ret; \
  45849. __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
  45850. __ret; \
  45851. })
  45852. #else
  45853. #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
  45854. float64x1_t __s0 = __p0; \
  45855. int64x1_t __ret; \
  45856. __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
  45857. __ret; \
  45858. })
  45859. #endif
  45860. #ifdef __LITTLE_ENDIAN__
  45861. #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
  45862. float64_t __s0 = __p0; \
  45863. int64_t __ret; \
  45864. __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
  45865. __ret; \
  45866. })
  45867. #else
  45868. #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
  45869. float64_t __s0 = __p0; \
  45870. int64_t __ret; \
  45871. __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
  45872. __ret; \
  45873. })
  45874. #endif
  45875. #ifdef __LITTLE_ENDIAN__
  45876. #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
  45877. float32_t __s0 = __p0; \
  45878. uint32_t __ret; \
  45879. __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
  45880. __ret; \
  45881. })
  45882. #else
  45883. #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
  45884. float32_t __s0 = __p0; \
  45885. uint32_t __ret; \
  45886. __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
  45887. __ret; \
  45888. })
  45889. #endif
  45890. #ifdef __LITTLE_ENDIAN__
  45891. #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
  45892. float64x2_t __s0 = __p0; \
  45893. uint64x2_t __ret; \
  45894. __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
  45895. __ret; \
  45896. })
  45897. #else
  45898. #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
  45899. float64x2_t __s0 = __p0; \
  45900. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  45901. uint64x2_t __ret; \
  45902. __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
  45903. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  45904. __ret; \
  45905. })
  45906. #endif
  45907. #ifdef __LITTLE_ENDIAN__
  45908. #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
  45909. float64x1_t __s0 = __p0; \
  45910. uint64x1_t __ret; \
  45911. __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
  45912. __ret; \
  45913. })
  45914. #else
  45915. #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
  45916. float64x1_t __s0 = __p0; \
  45917. uint64x1_t __ret; \
  45918. __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
  45919. __ret; \
  45920. })
  45921. #endif
  45922. #ifdef __LITTLE_ENDIAN__
  45923. #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
  45924. float64_t __s0 = __p0; \
  45925. uint64_t __ret; \
  45926. __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
  45927. __ret; \
  45928. })
  45929. #else
  45930. #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
  45931. float64_t __s0 = __p0; \
  45932. uint64_t __ret; \
  45933. __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
  45934. __ret; \
  45935. })
  45936. #endif
  45937. #ifdef __LITTLE_ENDIAN__
  45938. __ai int32_t vcvts_s32_f32(float32_t __p0) {
  45939. int32_t __ret;
  45940. __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
  45941. return __ret;
  45942. }
  45943. #else
  45944. __ai int32_t vcvts_s32_f32(float32_t __p0) {
  45945. int32_t __ret;
  45946. __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
  45947. return __ret;
  45948. }
  45949. #endif
  45950. #ifdef __LITTLE_ENDIAN__
  45951. __ai int64_t vcvtd_s64_f64(float64_t __p0) {
  45952. int64_t __ret;
  45953. __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
  45954. return __ret;
  45955. }
  45956. #else
  45957. __ai int64_t vcvtd_s64_f64(float64_t __p0) {
  45958. int64_t __ret;
  45959. __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
  45960. return __ret;
  45961. }
  45962. #endif
  45963. #ifdef __LITTLE_ENDIAN__
  45964. __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
  45965. int64x2_t __ret;
  45966. __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
  45967. return __ret;
  45968. }
  45969. #else
  45970. __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
  45971. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  45972. int64x2_t __ret;
  45973. __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
  45974. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  45975. return __ret;
  45976. }
  45977. #endif
  45978. #ifdef __LITTLE_ENDIAN__
  45979. __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
  45980. int64x1_t __ret;
  45981. __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
  45982. return __ret;
  45983. }
  45984. #else
  45985. __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
  45986. int64x1_t __ret;
  45987. __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
  45988. return __ret;
  45989. }
  45990. #endif
  45991. #ifdef __LITTLE_ENDIAN__
  45992. __ai uint32_t vcvts_u32_f32(float32_t __p0) {
  45993. uint32_t __ret;
  45994. __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
  45995. return __ret;
  45996. }
  45997. #else
  45998. __ai uint32_t vcvts_u32_f32(float32_t __p0) {
  45999. uint32_t __ret;
  46000. __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
  46001. return __ret;
  46002. }
  46003. #endif
  46004. #ifdef __LITTLE_ENDIAN__
  46005. __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
  46006. uint64_t __ret;
  46007. __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
  46008. return __ret;
  46009. }
  46010. #else
  46011. __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
  46012. uint64_t __ret;
  46013. __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
  46014. return __ret;
  46015. }
  46016. #endif
  46017. #ifdef __LITTLE_ENDIAN__
  46018. __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
  46019. uint64x2_t __ret;
  46020. __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
  46021. return __ret;
  46022. }
  46023. #else
  46024. __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
  46025. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  46026. uint64x2_t __ret;
  46027. __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
  46028. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  46029. return __ret;
  46030. }
  46031. #endif
  46032. #ifdef __LITTLE_ENDIAN__
  46033. __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
  46034. uint64x1_t __ret;
  46035. __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
  46036. return __ret;
  46037. }
  46038. #else
  46039. __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
  46040. uint64x1_t __ret;
  46041. __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
  46042. return __ret;
  46043. }
  46044. #endif
  46045. #ifdef __LITTLE_ENDIAN__
  46046. __ai int32_t vcvtas_s32_f32(float32_t __p0) {
  46047. int32_t __ret;
  46048. __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
  46049. return __ret;
  46050. }
  46051. #else
  46052. __ai int32_t vcvtas_s32_f32(float32_t __p0) {
  46053. int32_t __ret;
  46054. __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
  46055. return __ret;
  46056. }
  46057. #endif
  46058. #ifdef __LITTLE_ENDIAN__
  46059. __ai int64_t vcvtad_s64_f64(float64_t __p0) {
  46060. int64_t __ret;
  46061. __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
  46062. return __ret;
  46063. }
  46064. #else
  46065. __ai int64_t vcvtad_s64_f64(float64_t __p0) {
  46066. int64_t __ret;
  46067. __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
  46068. return __ret;
  46069. }
  46070. #endif
  46071. #ifdef __LITTLE_ENDIAN__
  46072. __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
  46073. uint32_t __ret;
  46074. __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
  46075. return __ret;
  46076. }
  46077. #else
  46078. __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
  46079. uint32_t __ret;
  46080. __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
  46081. return __ret;
  46082. }
  46083. #endif
  46084. #ifdef __LITTLE_ENDIAN__
  46085. __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
  46086. uint64_t __ret;
  46087. __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
  46088. return __ret;
  46089. }
  46090. #else
  46091. __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
  46092. uint64_t __ret;
  46093. __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
  46094. return __ret;
  46095. }
  46096. #endif
  46097. #ifdef __LITTLE_ENDIAN__
  46098. __ai int32_t vcvtms_s32_f32(float32_t __p0) {
  46099. int32_t __ret;
  46100. __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
  46101. return __ret;
  46102. }
  46103. #else
  46104. __ai int32_t vcvtms_s32_f32(float32_t __p0) {
  46105. int32_t __ret;
  46106. __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
  46107. return __ret;
  46108. }
  46109. #endif
  46110. #ifdef __LITTLE_ENDIAN__
  46111. __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
  46112. int64_t __ret;
  46113. __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
  46114. return __ret;
  46115. }
  46116. #else
  46117. __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
  46118. int64_t __ret;
  46119. __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
  46120. return __ret;
  46121. }
  46122. #endif
  46123. #ifdef __LITTLE_ENDIAN__
  46124. __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
  46125. uint32_t __ret;
  46126. __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
  46127. return __ret;
  46128. }
  46129. #else
  46130. __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
  46131. uint32_t __ret;
  46132. __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
  46133. return __ret;
  46134. }
  46135. #endif
  46136. #ifdef __LITTLE_ENDIAN__
  46137. __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
  46138. uint64_t __ret;
  46139. __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
  46140. return __ret;
  46141. }
  46142. #else
  46143. __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
  46144. uint64_t __ret;
  46145. __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
  46146. return __ret;
  46147. }
  46148. #endif
  46149. #ifdef __LITTLE_ENDIAN__
  46150. __ai int32_t vcvtns_s32_f32(float32_t __p0) {
  46151. int32_t __ret;
  46152. __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
  46153. return __ret;
  46154. }
  46155. #else
  46156. __ai int32_t vcvtns_s32_f32(float32_t __p0) {
  46157. int32_t __ret;
  46158. __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
  46159. return __ret;
  46160. }
  46161. #endif
  46162. #ifdef __LITTLE_ENDIAN__
  46163. __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
  46164. int64_t __ret;
  46165. __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
  46166. return __ret;
  46167. }
  46168. #else
  46169. __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
  46170. int64_t __ret;
  46171. __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
  46172. return __ret;
  46173. }
  46174. #endif
  46175. #ifdef __LITTLE_ENDIAN__
  46176. __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
  46177. uint32_t __ret;
  46178. __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
  46179. return __ret;
  46180. }
  46181. #else
  46182. __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
  46183. uint32_t __ret;
  46184. __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
  46185. return __ret;
  46186. }
  46187. #endif
  46188. #ifdef __LITTLE_ENDIAN__
  46189. __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
  46190. uint64_t __ret;
  46191. __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
  46192. return __ret;
  46193. }
  46194. #else
  46195. __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
  46196. uint64_t __ret;
  46197. __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
  46198. return __ret;
  46199. }
  46200. #endif
  46201. #ifdef __LITTLE_ENDIAN__
  46202. __ai int32_t vcvtps_s32_f32(float32_t __p0) {
  46203. int32_t __ret;
  46204. __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
  46205. return __ret;
  46206. }
  46207. #else
  46208. __ai int32_t vcvtps_s32_f32(float32_t __p0) {
  46209. int32_t __ret;
  46210. __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
  46211. return __ret;
  46212. }
  46213. #endif
  46214. #ifdef __LITTLE_ENDIAN__
  46215. __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
  46216. int64_t __ret;
  46217. __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
  46218. return __ret;
  46219. }
  46220. #else
  46221. __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
  46222. int64_t __ret;
  46223. __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
  46224. return __ret;
  46225. }
  46226. #endif
  46227. #ifdef __LITTLE_ENDIAN__
  46228. __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
  46229. uint32_t __ret;
  46230. __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
  46231. return __ret;
  46232. }
  46233. #else
  46234. __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
  46235. uint32_t __ret;
  46236. __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
  46237. return __ret;
  46238. }
  46239. #endif
  46240. #ifdef __LITTLE_ENDIAN__
  46241. __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
  46242. uint64_t __ret;
  46243. __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
  46244. return __ret;
  46245. }
  46246. #else
  46247. __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
  46248. uint64_t __ret;
  46249. __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
  46250. return __ret;
  46251. }
  46252. #endif
  46253. #ifdef __LITTLE_ENDIAN__
  46254. __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
  46255. float32_t __ret;
  46256. __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
  46257. return __ret;
  46258. }
  46259. #else
  46260. __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
  46261. float32_t __ret;
  46262. __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
  46263. return __ret;
  46264. }
  46265. #endif
  46266. #ifdef __LITTLE_ENDIAN__
  46267. __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
  46268. float32x2_t __ret;
  46269. __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
  46270. return __ret;
  46271. }
  46272. #else
  46273. __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
  46274. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  46275. float32x2_t __ret;
  46276. __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
  46277. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  46278. return __ret;
  46279. }
  46280. __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
  46281. float32x2_t __ret;
  46282. __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
  46283. return __ret;
  46284. }
  46285. #endif
  46286. #ifdef __LITTLE_ENDIAN__
  46287. __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
  46288. float32x4_t __ret;
  46289. __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
  46290. return __ret;
  46291. }
  46292. #else
  46293. __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
  46294. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  46295. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  46296. float32x4_t __ret;
  46297. __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
  46298. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  46299. return __ret;
  46300. }
  46301. #endif
  46302. #ifdef __LITTLE_ENDIAN__
  46303. __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
  46304. float64x2_t __ret;
  46305. __ret = __p0 / __p1;
  46306. return __ret;
  46307. }
  46308. #else
  46309. __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
  46310. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  46311. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  46312. float64x2_t __ret;
  46313. __ret = __rev0 / __rev1;
  46314. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  46315. return __ret;
  46316. }
  46317. #endif
  46318. #ifdef __LITTLE_ENDIAN__
  46319. __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
  46320. float32x4_t __ret;
  46321. __ret = __p0 / __p1;
  46322. return __ret;
  46323. }
  46324. #else
  46325. __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
  46326. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  46327. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  46328. float32x4_t __ret;
  46329. __ret = __rev0 / __rev1;
  46330. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  46331. return __ret;
  46332. }
  46333. #endif
  46334. #ifdef __LITTLE_ENDIAN__
  46335. __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
  46336. float64x1_t __ret;
  46337. __ret = __p0 / __p1;
  46338. return __ret;
  46339. }
  46340. #else
  46341. __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
  46342. float64x1_t __ret;
  46343. __ret = __p0 / __p1;
  46344. return __ret;
  46345. }
  46346. #endif
  46347. #ifdef __LITTLE_ENDIAN__
  46348. __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
  46349. float32x2_t __ret;
  46350. __ret = __p0 / __p1;
  46351. return __ret;
  46352. }
  46353. #else
  46354. __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
  46355. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  46356. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  46357. float32x2_t __ret;
  46358. __ret = __rev0 / __rev1;
  46359. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  46360. return __ret;
  46361. }
  46362. #endif
  46363. #ifdef __LITTLE_ENDIAN__
  46364. #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
  46365. poly8x8_t __s0 = __p0; \
  46366. poly8_t __ret; \
  46367. __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
  46368. __ret; \
  46369. })
  46370. #else
  46371. #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
  46372. poly8x8_t __s0 = __p0; \
  46373. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46374. poly8_t __ret; \
  46375. __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
  46376. __ret; \
  46377. })
  46378. #endif
  46379. #ifdef __LITTLE_ENDIAN__
  46380. #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
  46381. poly16x4_t __s0 = __p0; \
  46382. poly16_t __ret; \
  46383. __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
  46384. __ret; \
  46385. })
  46386. #else
  46387. #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
  46388. poly16x4_t __s0 = __p0; \
  46389. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46390. poly16_t __ret; \
  46391. __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
  46392. __ret; \
  46393. })
  46394. #endif
  46395. #ifdef __LITTLE_ENDIAN__
  46396. #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
  46397. uint8x8_t __s0 = __p0; \
  46398. uint8_t __ret; \
  46399. __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
  46400. __ret; \
  46401. })
  46402. #else
  46403. #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
  46404. uint8x8_t __s0 = __p0; \
  46405. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46406. uint8_t __ret; \
  46407. __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
  46408. __ret; \
  46409. })
  46410. #endif
  46411. #ifdef __LITTLE_ENDIAN__
  46412. #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
  46413. uint32x2_t __s0 = __p0; \
  46414. uint32_t __ret; \
  46415. __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
  46416. __ret; \
  46417. })
  46418. #else
  46419. #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
  46420. uint32x2_t __s0 = __p0; \
  46421. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46422. uint32_t __ret; \
  46423. __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
  46424. __ret; \
  46425. })
  46426. #endif
  46427. #ifdef __LITTLE_ENDIAN__
  46428. #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
  46429. uint64x1_t __s0 = __p0; \
  46430. uint64_t __ret; \
  46431. __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
  46432. __ret; \
  46433. })
  46434. #else
  46435. #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
  46436. uint64x1_t __s0 = __p0; \
  46437. uint64_t __ret; \
  46438. __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
  46439. __ret; \
  46440. })
  46441. #endif
  46442. #ifdef __LITTLE_ENDIAN__
  46443. #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
  46444. uint16x4_t __s0 = __p0; \
  46445. uint16_t __ret; \
  46446. __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
  46447. __ret; \
  46448. })
  46449. #else
  46450. #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
  46451. uint16x4_t __s0 = __p0; \
  46452. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46453. uint16_t __ret; \
  46454. __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
  46455. __ret; \
  46456. })
  46457. #endif
  46458. #ifdef __LITTLE_ENDIAN__
  46459. #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
  46460. int8x8_t __s0 = __p0; \
  46461. int8_t __ret; \
  46462. __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
  46463. __ret; \
  46464. })
  46465. #else
  46466. #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
  46467. int8x8_t __s0 = __p0; \
  46468. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46469. int8_t __ret; \
  46470. __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
  46471. __ret; \
  46472. })
  46473. #endif
  46474. #ifdef __LITTLE_ENDIAN__
  46475. #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
  46476. float64x1_t __s0 = __p0; \
  46477. float64_t __ret; \
  46478. __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
  46479. __ret; \
  46480. })
  46481. #else
  46482. #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
  46483. float64x1_t __s0 = __p0; \
  46484. float64_t __ret; \
  46485. __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
  46486. __ret; \
  46487. })
  46488. #endif
  46489. #ifdef __LITTLE_ENDIAN__
  46490. #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
  46491. float32x2_t __s0 = __p0; \
  46492. float32_t __ret; \
  46493. __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
  46494. __ret; \
  46495. })
  46496. #else
  46497. #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
  46498. float32x2_t __s0 = __p0; \
  46499. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46500. float32_t __ret; \
  46501. __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
  46502. __ret; \
  46503. })
  46504. #endif
  46505. #ifdef __LITTLE_ENDIAN__
  46506. #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
  46507. int32x2_t __s0 = __p0; \
  46508. int32_t __ret; \
  46509. __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
  46510. __ret; \
  46511. })
  46512. #else
  46513. #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
  46514. int32x2_t __s0 = __p0; \
  46515. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46516. int32_t __ret; \
  46517. __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
  46518. __ret; \
  46519. })
  46520. #endif
  46521. #ifdef __LITTLE_ENDIAN__
  46522. #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
  46523. int64x1_t __s0 = __p0; \
  46524. int64_t __ret; \
  46525. __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
  46526. __ret; \
  46527. })
  46528. #else
  46529. #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
  46530. int64x1_t __s0 = __p0; \
  46531. int64_t __ret; \
  46532. __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
  46533. __ret; \
  46534. })
  46535. #endif
  46536. #ifdef __LITTLE_ENDIAN__
  46537. #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
  46538. int16x4_t __s0 = __p0; \
  46539. int16_t __ret; \
  46540. __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
  46541. __ret; \
  46542. })
  46543. #else
  46544. #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
  46545. int16x4_t __s0 = __p0; \
  46546. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46547. int16_t __ret; \
  46548. __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
  46549. __ret; \
  46550. })
  46551. #endif
  46552. #ifdef __LITTLE_ENDIAN__
  46553. #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
  46554. poly64x1_t __s0 = __p0; \
  46555. poly64x1_t __ret; \
  46556. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  46557. __ret; \
  46558. })
  46559. #else
  46560. #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
  46561. poly64x1_t __s0 = __p0; \
  46562. poly64x1_t __ret; \
  46563. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  46564. __ret; \
  46565. })
  46566. #endif
  46567. #ifdef __LITTLE_ENDIAN__
  46568. #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
  46569. poly64x1_t __s0 = __p0; \
  46570. poly64x2_t __ret; \
  46571. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46572. __ret; \
  46573. })
  46574. #else
  46575. #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
  46576. poly64x1_t __s0 = __p0; \
  46577. poly64x2_t __ret; \
  46578. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46579. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  46580. __ret; \
  46581. })
  46582. #endif
  46583. #ifdef __LITTLE_ENDIAN__
  46584. #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
  46585. float64x1_t __s0 = __p0; \
  46586. float64x2_t __ret; \
  46587. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46588. __ret; \
  46589. })
  46590. #else
  46591. #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
  46592. float64x1_t __s0 = __p0; \
  46593. float64x2_t __ret; \
  46594. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46595. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  46596. __ret; \
  46597. })
  46598. #endif
  46599. #ifdef __LITTLE_ENDIAN__
  46600. #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
  46601. float16x4_t __s0 = __p0; \
  46602. float16x8_t __ret; \
  46603. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46604. __ret; \
  46605. })
  46606. #else
  46607. #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
  46608. float16x4_t __s0 = __p0; \
  46609. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46610. float16x8_t __ret; \
  46611. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46612. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  46613. __ret; \
  46614. })
  46615. #endif
  46616. #ifdef __LITTLE_ENDIAN__
  46617. #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
  46618. float64x1_t __s0 = __p0; \
  46619. float64x1_t __ret; \
  46620. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  46621. __ret; \
  46622. })
  46623. #else
  46624. #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
  46625. float64x1_t __s0 = __p0; \
  46626. float64x1_t __ret; \
  46627. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  46628. __ret; \
  46629. })
  46630. #endif
  46631. #ifdef __LITTLE_ENDIAN__
  46632. #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
  46633. float16x4_t __s0 = __p0; \
  46634. float16x4_t __ret; \
  46635. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  46636. __ret; \
  46637. })
  46638. #else
  46639. #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
  46640. float16x4_t __s0 = __p0; \
  46641. float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46642. float16x4_t __ret; \
  46643. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  46644. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  46645. __ret; \
  46646. })
  46647. #endif
  46648. #ifdef __LITTLE_ENDIAN__
  46649. #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
  46650. poly8x16_t __s0 = __p0; \
  46651. poly8_t __ret; \
  46652. __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
  46653. __ret; \
  46654. })
  46655. #else
  46656. #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
  46657. poly8x16_t __s0 = __p0; \
  46658. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46659. poly8_t __ret; \
  46660. __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
  46661. __ret; \
  46662. })
  46663. #endif
  46664. #ifdef __LITTLE_ENDIAN__
  46665. #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
  46666. poly16x8_t __s0 = __p0; \
  46667. poly16_t __ret; \
  46668. __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
  46669. __ret; \
  46670. })
  46671. #else
  46672. #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
  46673. poly16x8_t __s0 = __p0; \
  46674. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46675. poly16_t __ret; \
  46676. __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
  46677. __ret; \
  46678. })
  46679. #endif
  46680. #ifdef __LITTLE_ENDIAN__
  46681. #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
  46682. uint8x16_t __s0 = __p0; \
  46683. uint8_t __ret; \
  46684. __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
  46685. __ret; \
  46686. })
  46687. #else
  46688. #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
  46689. uint8x16_t __s0 = __p0; \
  46690. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46691. uint8_t __ret; \
  46692. __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
  46693. __ret; \
  46694. })
  46695. #endif
  46696. #ifdef __LITTLE_ENDIAN__
  46697. #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
  46698. uint32x4_t __s0 = __p0; \
  46699. uint32_t __ret; \
  46700. __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
  46701. __ret; \
  46702. })
  46703. #else
  46704. #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
  46705. uint32x4_t __s0 = __p0; \
  46706. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46707. uint32_t __ret; \
  46708. __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
  46709. __ret; \
  46710. })
  46711. #endif
  46712. #ifdef __LITTLE_ENDIAN__
  46713. #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
  46714. uint64x2_t __s0 = __p0; \
  46715. uint64_t __ret; \
  46716. __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
  46717. __ret; \
  46718. })
  46719. #else
  46720. #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
  46721. uint64x2_t __s0 = __p0; \
  46722. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46723. uint64_t __ret; \
  46724. __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
  46725. __ret; \
  46726. })
  46727. #endif
  46728. #ifdef __LITTLE_ENDIAN__
  46729. #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
  46730. uint16x8_t __s0 = __p0; \
  46731. uint16_t __ret; \
  46732. __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
  46733. __ret; \
  46734. })
  46735. #else
  46736. #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
  46737. uint16x8_t __s0 = __p0; \
  46738. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46739. uint16_t __ret; \
  46740. __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
  46741. __ret; \
  46742. })
  46743. #endif
  46744. #ifdef __LITTLE_ENDIAN__
  46745. #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
  46746. int8x16_t __s0 = __p0; \
  46747. int8_t __ret; \
  46748. __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
  46749. __ret; \
  46750. })
  46751. #else
  46752. #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
  46753. int8x16_t __s0 = __p0; \
  46754. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46755. int8_t __ret; \
  46756. __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
  46757. __ret; \
  46758. })
  46759. #endif
  46760. #ifdef __LITTLE_ENDIAN__
  46761. #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
  46762. float64x2_t __s0 = __p0; \
  46763. float64_t __ret; \
  46764. __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
  46765. __ret; \
  46766. })
  46767. #else
  46768. #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
  46769. float64x2_t __s0 = __p0; \
  46770. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46771. float64_t __ret; \
  46772. __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
  46773. __ret; \
  46774. })
  46775. #endif
  46776. #ifdef __LITTLE_ENDIAN__
  46777. #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
  46778. float32x4_t __s0 = __p0; \
  46779. float32_t __ret; \
  46780. __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
  46781. __ret; \
  46782. })
  46783. #else
  46784. #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
  46785. float32x4_t __s0 = __p0; \
  46786. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46787. float32_t __ret; \
  46788. __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
  46789. __ret; \
  46790. })
  46791. #endif
  46792. #ifdef __LITTLE_ENDIAN__
  46793. #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
  46794. int32x4_t __s0 = __p0; \
  46795. int32_t __ret; \
  46796. __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
  46797. __ret; \
  46798. })
  46799. #else
  46800. #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
  46801. int32x4_t __s0 = __p0; \
  46802. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46803. int32_t __ret; \
  46804. __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
  46805. __ret; \
  46806. })
  46807. #endif
  46808. #ifdef __LITTLE_ENDIAN__
  46809. #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
  46810. int64x2_t __s0 = __p0; \
  46811. int64_t __ret; \
  46812. __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
  46813. __ret; \
  46814. })
  46815. #else
  46816. #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
  46817. int64x2_t __s0 = __p0; \
  46818. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46819. int64_t __ret; \
  46820. __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
  46821. __ret; \
  46822. })
  46823. #endif
  46824. #ifdef __LITTLE_ENDIAN__
  46825. #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
  46826. int16x8_t __s0 = __p0; \
  46827. int16_t __ret; \
  46828. __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
  46829. __ret; \
  46830. })
  46831. #else
  46832. #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
  46833. int16x8_t __s0 = __p0; \
  46834. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46835. int16_t __ret; \
  46836. __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
  46837. __ret; \
  46838. })
  46839. #endif
  46840. #ifdef __LITTLE_ENDIAN__
  46841. #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
  46842. poly8x16_t __s0 = __p0; \
  46843. poly8x8_t __ret; \
  46844. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46845. __ret; \
  46846. })
  46847. #else
  46848. #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
  46849. poly8x16_t __s0 = __p0; \
  46850. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46851. poly8x8_t __ret; \
  46852. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46853. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  46854. __ret; \
  46855. })
  46856. #endif
  46857. #ifdef __LITTLE_ENDIAN__
  46858. #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
  46859. poly64x2_t __s0 = __p0; \
  46860. poly64x1_t __ret; \
  46861. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  46862. __ret; \
  46863. })
  46864. #else
  46865. #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
  46866. poly64x2_t __s0 = __p0; \
  46867. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46868. poly64x1_t __ret; \
  46869. __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
  46870. __ret; \
  46871. })
  46872. #endif
  46873. #ifdef __LITTLE_ENDIAN__
  46874. #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
  46875. poly16x8_t __s0 = __p0; \
  46876. poly16x4_t __ret; \
  46877. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  46878. __ret; \
  46879. })
  46880. #else
  46881. #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
  46882. poly16x8_t __s0 = __p0; \
  46883. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46884. poly16x4_t __ret; \
  46885. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  46886. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  46887. __ret; \
  46888. })
  46889. #endif
  46890. #ifdef __LITTLE_ENDIAN__
  46891. #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
  46892. poly8x16_t __s0 = __p0; \
  46893. poly8x16_t __ret; \
  46894. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46895. __ret; \
  46896. })
  46897. #else
  46898. #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
  46899. poly8x16_t __s0 = __p0; \
  46900. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46901. poly8x16_t __ret; \
  46902. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46903. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46904. __ret; \
  46905. })
  46906. #endif
  46907. #ifdef __LITTLE_ENDIAN__
  46908. #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
  46909. poly64x2_t __s0 = __p0; \
  46910. poly64x2_t __ret; \
  46911. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46912. __ret; \
  46913. })
  46914. #else
  46915. #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
  46916. poly64x2_t __s0 = __p0; \
  46917. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46918. poly64x2_t __ret; \
  46919. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  46920. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  46921. __ret; \
  46922. })
  46923. #endif
  46924. #ifdef __LITTLE_ENDIAN__
  46925. #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
  46926. poly16x8_t __s0 = __p0; \
  46927. poly16x8_t __ret; \
  46928. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46929. __ret; \
  46930. })
  46931. #else
  46932. #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
  46933. poly16x8_t __s0 = __p0; \
  46934. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  46935. poly16x8_t __ret; \
  46936. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46937. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  46938. __ret; \
  46939. })
  46940. #endif
  46941. #ifdef __LITTLE_ENDIAN__
  46942. #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
  46943. uint8x16_t __s0 = __p0; \
  46944. uint8x16_t __ret; \
  46945. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46946. __ret; \
  46947. })
  46948. #else
  46949. #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
  46950. uint8x16_t __s0 = __p0; \
  46951. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46952. uint8x16_t __ret; \
  46953. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46954. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  46955. __ret; \
  46956. })
  46957. #endif
  46958. #ifdef __LITTLE_ENDIAN__
  46959. #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
  46960. uint32x4_t __s0 = __p0; \
  46961. uint32x4_t __ret; \
  46962. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  46963. __ret; \
  46964. })
  46965. #else
  46966. #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
  46967. uint32x4_t __s0 = __p0; \
  46968. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  46969. uint32x4_t __ret; \
  46970. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  46971. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  46972. __ret; \
  46973. })
  46974. #endif
  46975. #ifdef __LITTLE_ENDIAN__
  46976. #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
  46977. uint64x2_t __s0 = __p0; \
  46978. uint64x2_t __ret; \
  46979. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  46980. __ret; \
  46981. })
  46982. #else
  46983. #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
  46984. uint64x2_t __s0 = __p0; \
  46985. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  46986. uint64x2_t __ret; \
  46987. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  46988. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  46989. __ret; \
  46990. })
  46991. #endif
  46992. #ifdef __LITTLE_ENDIAN__
  46993. #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
  46994. uint16x8_t __s0 = __p0; \
  46995. uint16x8_t __ret; \
  46996. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  46997. __ret; \
  46998. })
  46999. #else
  47000. #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
  47001. uint16x8_t __s0 = __p0; \
  47002. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47003. uint16x8_t __ret; \
  47004. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47005. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  47006. __ret; \
  47007. })
  47008. #endif
  47009. #ifdef __LITTLE_ENDIAN__
  47010. #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
  47011. int8x16_t __s0 = __p0; \
  47012. int8x16_t __ret; \
  47013. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47014. __ret; \
  47015. })
  47016. #else
  47017. #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
  47018. int8x16_t __s0 = __p0; \
  47019. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  47020. int8x16_t __ret; \
  47021. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47022. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  47023. __ret; \
  47024. })
  47025. #endif
  47026. #ifdef __LITTLE_ENDIAN__
  47027. #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
  47028. float64x2_t __s0 = __p0; \
  47029. float64x2_t __ret; \
  47030. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  47031. __ret; \
  47032. })
  47033. #else
  47034. #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
  47035. float64x2_t __s0 = __p0; \
  47036. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47037. float64x2_t __ret; \
  47038. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  47039. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47040. __ret; \
  47041. })
  47042. #endif
  47043. #ifdef __LITTLE_ENDIAN__
  47044. #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
  47045. float32x4_t __s0 = __p0; \
  47046. float32x4_t __ret; \
  47047. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  47048. __ret; \
  47049. })
  47050. #else
  47051. #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
  47052. float32x4_t __s0 = __p0; \
  47053. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47054. float32x4_t __ret; \
  47055. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  47056. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47057. __ret; \
  47058. })
  47059. #endif
  47060. #ifdef __LITTLE_ENDIAN__
  47061. #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
  47062. float16x8_t __s0 = __p0; \
  47063. float16x8_t __ret; \
  47064. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47065. __ret; \
  47066. })
  47067. #else
  47068. #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
  47069. float16x8_t __s0 = __p0; \
  47070. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47071. float16x8_t __ret; \
  47072. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47073. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  47074. __ret; \
  47075. })
  47076. #endif
  47077. #ifdef __LITTLE_ENDIAN__
  47078. #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
  47079. int32x4_t __s0 = __p0; \
  47080. int32x4_t __ret; \
  47081. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  47082. __ret; \
  47083. })
  47084. #else
  47085. #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
  47086. int32x4_t __s0 = __p0; \
  47087. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47088. int32x4_t __ret; \
  47089. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  47090. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47091. __ret; \
  47092. })
  47093. #endif
  47094. #ifdef __LITTLE_ENDIAN__
  47095. #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
  47096. int64x2_t __s0 = __p0; \
  47097. int64x2_t __ret; \
  47098. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  47099. __ret; \
  47100. })
  47101. #else
  47102. #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
  47103. int64x2_t __s0 = __p0; \
  47104. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47105. int64x2_t __ret; \
  47106. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  47107. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47108. __ret; \
  47109. })
  47110. #endif
  47111. #ifdef __LITTLE_ENDIAN__
  47112. #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
  47113. int16x8_t __s0 = __p0; \
  47114. int16x8_t __ret; \
  47115. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47116. __ret; \
  47117. })
  47118. #else
  47119. #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
  47120. int16x8_t __s0 = __p0; \
  47121. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47122. int16x8_t __ret; \
  47123. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47124. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  47125. __ret; \
  47126. })
  47127. #endif
  47128. #ifdef __LITTLE_ENDIAN__
  47129. #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
  47130. uint8x16_t __s0 = __p0; \
  47131. uint8x8_t __ret; \
  47132. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47133. __ret; \
  47134. })
  47135. #else
  47136. #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
  47137. uint8x16_t __s0 = __p0; \
  47138. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  47139. uint8x8_t __ret; \
  47140. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47141. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  47142. __ret; \
  47143. })
  47144. #endif
  47145. #ifdef __LITTLE_ENDIAN__
  47146. #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
  47147. uint32x4_t __s0 = __p0; \
  47148. uint32x2_t __ret; \
  47149. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  47150. __ret; \
  47151. })
  47152. #else
  47153. #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
  47154. uint32x4_t __s0 = __p0; \
  47155. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47156. uint32x2_t __ret; \
  47157. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  47158. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47159. __ret; \
  47160. })
  47161. #endif
  47162. #ifdef __LITTLE_ENDIAN__
  47163. #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
  47164. uint64x2_t __s0 = __p0; \
  47165. uint64x1_t __ret; \
  47166. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  47167. __ret; \
  47168. })
  47169. #else
  47170. #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
  47171. uint64x2_t __s0 = __p0; \
  47172. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47173. uint64x1_t __ret; \
  47174. __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
  47175. __ret; \
  47176. })
  47177. #endif
  47178. #ifdef __LITTLE_ENDIAN__
  47179. #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
  47180. uint16x8_t __s0 = __p0; \
  47181. uint16x4_t __ret; \
  47182. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  47183. __ret; \
  47184. })
  47185. #else
  47186. #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
  47187. uint16x8_t __s0 = __p0; \
  47188. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47189. uint16x4_t __ret; \
  47190. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  47191. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47192. __ret; \
  47193. })
  47194. #endif
  47195. #ifdef __LITTLE_ENDIAN__
  47196. #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
  47197. int8x16_t __s0 = __p0; \
  47198. int8x8_t __ret; \
  47199. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47200. __ret; \
  47201. })
  47202. #else
  47203. #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
  47204. int8x16_t __s0 = __p0; \
  47205. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  47206. int8x8_t __ret; \
  47207. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
  47208. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  47209. __ret; \
  47210. })
  47211. #endif
  47212. #ifdef __LITTLE_ENDIAN__
  47213. #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
  47214. float64x2_t __s0 = __p0; \
  47215. float64x1_t __ret; \
  47216. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  47217. __ret; \
  47218. })
  47219. #else
  47220. #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
  47221. float64x2_t __s0 = __p0; \
  47222. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47223. float64x1_t __ret; \
  47224. __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
  47225. __ret; \
  47226. })
  47227. #endif
  47228. #ifdef __LITTLE_ENDIAN__
  47229. #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
  47230. float32x4_t __s0 = __p0; \
  47231. float32x2_t __ret; \
  47232. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  47233. __ret; \
  47234. })
  47235. #else
  47236. #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
  47237. float32x4_t __s0 = __p0; \
  47238. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47239. float32x2_t __ret; \
  47240. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  47241. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47242. __ret; \
  47243. })
  47244. #endif
  47245. #ifdef __LITTLE_ENDIAN__
  47246. #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
  47247. float16x8_t __s0 = __p0; \
  47248. float16x4_t __ret; \
  47249. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  47250. __ret; \
  47251. })
  47252. #else
  47253. #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
  47254. float16x8_t __s0 = __p0; \
  47255. float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47256. float16x4_t __ret; \
  47257. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  47258. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47259. __ret; \
  47260. })
  47261. #endif
  47262. #ifdef __LITTLE_ENDIAN__
  47263. #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
  47264. int32x4_t __s0 = __p0; \
  47265. int32x2_t __ret; \
  47266. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
  47267. __ret; \
  47268. })
  47269. #else
  47270. #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
  47271. int32x4_t __s0 = __p0; \
  47272. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47273. int32x2_t __ret; \
  47274. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
  47275. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47276. __ret; \
  47277. })
  47278. #endif
  47279. #ifdef __LITTLE_ENDIAN__
  47280. #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
  47281. int64x2_t __s0 = __p0; \
  47282. int64x1_t __ret; \
  47283. __ret = __builtin_shufflevector(__s0, __s0, __p1); \
  47284. __ret; \
  47285. })
  47286. #else
  47287. #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
  47288. int64x2_t __s0 = __p0; \
  47289. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47290. int64x1_t __ret; \
  47291. __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
  47292. __ret; \
  47293. })
  47294. #endif
  47295. #ifdef __LITTLE_ENDIAN__
  47296. #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
  47297. int16x8_t __s0 = __p0; \
  47298. int16x4_t __ret; \
  47299. __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
  47300. __ret; \
  47301. })
  47302. #else
  47303. #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
  47304. int16x8_t __s0 = __p0; \
  47305. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  47306. int16x4_t __ret; \
  47307. __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
  47308. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47309. __ret; \
  47310. })
  47311. #endif
  47312. #ifdef __LITTLE_ENDIAN__
  47313. __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
  47314. poly64x1_t __ret;
  47315. __ret = (poly64x1_t) {__p0};
  47316. return __ret;
  47317. }
  47318. #else
  47319. __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
  47320. poly64x1_t __ret;
  47321. __ret = (poly64x1_t) {__p0};
  47322. return __ret;
  47323. }
  47324. #endif
  47325. #ifdef __LITTLE_ENDIAN__
  47326. __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
  47327. poly64x2_t __ret;
  47328. __ret = (poly64x2_t) {__p0, __p0};
  47329. return __ret;
  47330. }
  47331. #else
  47332. __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
  47333. poly64x2_t __ret;
  47334. __ret = (poly64x2_t) {__p0, __p0};
  47335. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47336. return __ret;
  47337. }
  47338. #endif
  47339. #ifdef __LITTLE_ENDIAN__
  47340. __ai float64x2_t vdupq_n_f64(float64_t __p0) {
  47341. float64x2_t __ret;
  47342. __ret = (float64x2_t) {__p0, __p0};
  47343. return __ret;
  47344. }
  47345. #else
  47346. __ai float64x2_t vdupq_n_f64(float64_t __p0) {
  47347. float64x2_t __ret;
  47348. __ret = (float64x2_t) {__p0, __p0};
  47349. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47350. return __ret;
  47351. }
  47352. #endif
  47353. #ifdef __LITTLE_ENDIAN__
  47354. __ai float64x1_t vdup_n_f64(float64_t __p0) {
  47355. float64x1_t __ret;
  47356. __ret = (float64x1_t) {__p0};
  47357. return __ret;
  47358. }
  47359. #else
  47360. __ai float64x1_t vdup_n_f64(float64_t __p0) {
  47361. float64x1_t __ret;
  47362. __ret = (float64x1_t) {__p0};
  47363. return __ret;
  47364. }
  47365. #endif
  47366. #ifdef __LITTLE_ENDIAN__
  47367. #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
  47368. poly64x1_t __s0 = __p0; \
  47369. poly64x1_t __s1 = __p1; \
  47370. poly64x1_t __ret; \
  47371. __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  47372. __ret; \
  47373. })
  47374. #else
  47375. #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
  47376. poly64x1_t __s0 = __p0; \
  47377. poly64x1_t __s1 = __p1; \
  47378. poly64x1_t __ret; \
  47379. __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  47380. __ret; \
  47381. })
  47382. #endif
  47383. #ifdef __LITTLE_ENDIAN__
  47384. #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
  47385. poly64x2_t __s0 = __p0; \
  47386. poly64x2_t __s1 = __p1; \
  47387. poly64x2_t __ret; \
  47388. __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
  47389. __ret; \
  47390. })
  47391. #else
  47392. #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
  47393. poly64x2_t __s0 = __p0; \
  47394. poly64x2_t __s1 = __p1; \
  47395. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47396. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47397. poly64x2_t __ret; \
  47398. __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
  47399. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47400. __ret; \
  47401. })
  47402. #endif
  47403. #ifdef __LITTLE_ENDIAN__
  47404. #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
  47405. float64x2_t __s0 = __p0; \
  47406. float64x2_t __s1 = __p1; \
  47407. float64x2_t __ret; \
  47408. __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
  47409. __ret; \
  47410. })
  47411. #else
  47412. #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
  47413. float64x2_t __s0 = __p0; \
  47414. float64x2_t __s1 = __p1; \
  47415. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47416. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47417. float64x2_t __ret; \
  47418. __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
  47419. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47420. __ret; \
  47421. })
  47422. #endif
  47423. #ifdef __LITTLE_ENDIAN__
  47424. #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
  47425. float64x1_t __s0 = __p0; \
  47426. float64x1_t __s1 = __p1; \
  47427. float64x1_t __ret; \
  47428. __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
  47429. __ret; \
  47430. })
  47431. #else
  47432. #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
  47433. float64x1_t __s0 = __p0; \
  47434. float64x1_t __s1 = __p1; \
  47435. float64x1_t __ret; \
  47436. __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
  47437. __ret; \
  47438. })
  47439. #endif
  47440. #ifdef __LITTLE_ENDIAN__
  47441. __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  47442. float64x2_t __ret;
  47443. __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
  47444. return __ret;
  47445. }
  47446. #else
  47447. __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  47448. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  47449. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  47450. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  47451. float64x2_t __ret;
  47452. __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
  47453. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47454. return __ret;
  47455. }
  47456. __ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  47457. float64x2_t __ret;
  47458. __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
  47459. return __ret;
  47460. }
  47461. #endif
  47462. #ifdef __LITTLE_ENDIAN__
  47463. __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  47464. float64x1_t __ret;
  47465. __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
  47466. return __ret;
  47467. }
  47468. #else
  47469. __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  47470. float64x1_t __ret;
  47471. __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
  47472. return __ret;
  47473. }
  47474. __ai float64x1_t __noswap_vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  47475. float64x1_t __ret;
  47476. __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
  47477. return __ret;
  47478. }
  47479. #endif
  47480. #ifdef __LITTLE_ENDIAN__
  47481. #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47482. float64_t __s0 = __p0; \
  47483. float64_t __s1 = __p1; \
  47484. float64x1_t __s2 = __p2; \
  47485. float64_t __ret; \
  47486. __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
  47487. __ret; \
  47488. })
  47489. #else
  47490. #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47491. float64_t __s0 = __p0; \
  47492. float64_t __s1 = __p1; \
  47493. float64x1_t __s2 = __p2; \
  47494. float64_t __ret; \
  47495. __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
  47496. __ret; \
  47497. })
  47498. #define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47499. float64_t __s0 = __p0; \
  47500. float64_t __s1 = __p1; \
  47501. float64x1_t __s2 = __p2; \
  47502. float64_t __ret; \
  47503. __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
  47504. __ret; \
  47505. })
  47506. #endif
  47507. #ifdef __LITTLE_ENDIAN__
  47508. #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47509. float32_t __s0 = __p0; \
  47510. float32_t __s1 = __p1; \
  47511. float32x2_t __s2 = __p2; \
  47512. float32_t __ret; \
  47513. __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
  47514. __ret; \
  47515. })
  47516. #else
  47517. #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47518. float32_t __s0 = __p0; \
  47519. float32_t __s1 = __p1; \
  47520. float32x2_t __s2 = __p2; \
  47521. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47522. float32_t __ret; \
  47523. __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
  47524. __ret; \
  47525. })
  47526. #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47527. float32_t __s0 = __p0; \
  47528. float32_t __s1 = __p1; \
  47529. float32x2_t __s2 = __p2; \
  47530. float32_t __ret; \
  47531. __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
  47532. __ret; \
  47533. })
  47534. #endif
  47535. #ifdef __LITTLE_ENDIAN__
  47536. #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47537. float64x2_t __s0 = __p0; \
  47538. float64x2_t __s1 = __p1; \
  47539. float64x1_t __s2 = __p2; \
  47540. float64x2_t __ret; \
  47541. __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
  47542. __ret; \
  47543. })
  47544. #else
  47545. #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47546. float64x2_t __s0 = __p0; \
  47547. float64x2_t __s1 = __p1; \
  47548. float64x1_t __s2 = __p2; \
  47549. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47550. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47551. float64x2_t __ret; \
  47552. __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
  47553. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47554. __ret; \
  47555. })
  47556. #define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47557. float64x2_t __s0 = __p0; \
  47558. float64x2_t __s1 = __p1; \
  47559. float64x1_t __s2 = __p2; \
  47560. float64x2_t __ret; \
  47561. __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
  47562. __ret; \
  47563. })
  47564. #endif
  47565. #ifdef __LITTLE_ENDIAN__
  47566. #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47567. float32x4_t __s0 = __p0; \
  47568. float32x4_t __s1 = __p1; \
  47569. float32x2_t __s2 = __p2; \
  47570. float32x4_t __ret; \
  47571. __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
  47572. __ret; \
  47573. })
  47574. #else
  47575. #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47576. float32x4_t __s0 = __p0; \
  47577. float32x4_t __s1 = __p1; \
  47578. float32x2_t __s2 = __p2; \
  47579. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47580. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  47581. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47582. float32x4_t __ret; \
  47583. __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
  47584. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47585. __ret; \
  47586. })
  47587. #define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47588. float32x4_t __s0 = __p0; \
  47589. float32x4_t __s1 = __p1; \
  47590. float32x2_t __s2 = __p2; \
  47591. float32x4_t __ret; \
  47592. __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
  47593. __ret; \
  47594. })
  47595. #endif
  47596. #ifdef __LITTLE_ENDIAN__
  47597. #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47598. float64x1_t __s0 = __p0; \
  47599. float64x1_t __s1 = __p1; \
  47600. float64x1_t __s2 = __p2; \
  47601. float64x1_t __ret; \
  47602. __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
  47603. __ret; \
  47604. })
  47605. #else
  47606. #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47607. float64x1_t __s0 = __p0; \
  47608. float64x1_t __s1 = __p1; \
  47609. float64x1_t __s2 = __p2; \
  47610. float64x1_t __ret; \
  47611. __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
  47612. __ret; \
  47613. })
  47614. #define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47615. float64x1_t __s0 = __p0; \
  47616. float64x1_t __s1 = __p1; \
  47617. float64x1_t __s2 = __p2; \
  47618. float64x1_t __ret; \
  47619. __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
  47620. __ret; \
  47621. })
  47622. #endif
  47623. #ifdef __LITTLE_ENDIAN__
  47624. #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47625. float32x2_t __s0 = __p0; \
  47626. float32x2_t __s1 = __p1; \
  47627. float32x2_t __s2 = __p2; \
  47628. float32x2_t __ret; \
  47629. __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
  47630. __ret; \
  47631. })
  47632. #else
  47633. #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47634. float32x2_t __s0 = __p0; \
  47635. float32x2_t __s1 = __p1; \
  47636. float32x2_t __s2 = __p2; \
  47637. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47638. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47639. float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47640. float32x2_t __ret; \
  47641. __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
  47642. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47643. __ret; \
  47644. })
  47645. #define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47646. float32x2_t __s0 = __p0; \
  47647. float32x2_t __s1 = __p1; \
  47648. float32x2_t __s2 = __p2; \
  47649. float32x2_t __ret; \
  47650. __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
  47651. __ret; \
  47652. })
  47653. #endif
  47654. #ifdef __LITTLE_ENDIAN__
  47655. #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47656. float64_t __s0 = __p0; \
  47657. float64_t __s1 = __p1; \
  47658. float64x2_t __s2 = __p2; \
  47659. float64_t __ret; \
  47660. __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
  47661. __ret; \
  47662. })
  47663. #else
  47664. #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47665. float64_t __s0 = __p0; \
  47666. float64_t __s1 = __p1; \
  47667. float64x2_t __s2 = __p2; \
  47668. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47669. float64_t __ret; \
  47670. __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
  47671. __ret; \
  47672. })
  47673. #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47674. float64_t __s0 = __p0; \
  47675. float64_t __s1 = __p1; \
  47676. float64x2_t __s2 = __p2; \
  47677. float64_t __ret; \
  47678. __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
  47679. __ret; \
  47680. })
  47681. #endif
  47682. #ifdef __LITTLE_ENDIAN__
  47683. #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47684. float32_t __s0 = __p0; \
  47685. float32_t __s1 = __p1; \
  47686. float32x4_t __s2 = __p2; \
  47687. float32_t __ret; \
  47688. __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
  47689. __ret; \
  47690. })
  47691. #else
  47692. #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47693. float32_t __s0 = __p0; \
  47694. float32_t __s1 = __p1; \
  47695. float32x4_t __s2 = __p2; \
  47696. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  47697. float32_t __ret; \
  47698. __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
  47699. __ret; \
  47700. })
  47701. #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47702. float32_t __s0 = __p0; \
  47703. float32_t __s1 = __p1; \
  47704. float32x4_t __s2 = __p2; \
  47705. float32_t __ret; \
  47706. __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
  47707. __ret; \
  47708. })
  47709. #endif
  47710. #ifdef __LITTLE_ENDIAN__
  47711. #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47712. float64x2_t __s0 = __p0; \
  47713. float64x2_t __s1 = __p1; \
  47714. float64x2_t __s2 = __p2; \
  47715. float64x2_t __ret; \
  47716. __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
  47717. __ret; \
  47718. })
  47719. #else
  47720. #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47721. float64x2_t __s0 = __p0; \
  47722. float64x2_t __s1 = __p1; \
  47723. float64x2_t __s2 = __p2; \
  47724. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47725. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47726. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47727. float64x2_t __ret; \
  47728. __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
  47729. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47730. __ret; \
  47731. })
  47732. #define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47733. float64x2_t __s0 = __p0; \
  47734. float64x2_t __s1 = __p1; \
  47735. float64x2_t __s2 = __p2; \
  47736. float64x2_t __ret; \
  47737. __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
  47738. __ret; \
  47739. })
  47740. #endif
  47741. #ifdef __LITTLE_ENDIAN__
  47742. #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47743. float32x4_t __s0 = __p0; \
  47744. float32x4_t __s1 = __p1; \
  47745. float32x4_t __s2 = __p2; \
  47746. float32x4_t __ret; \
  47747. __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
  47748. __ret; \
  47749. })
  47750. #else
  47751. #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47752. float32x4_t __s0 = __p0; \
  47753. float32x4_t __s1 = __p1; \
  47754. float32x4_t __s2 = __p2; \
  47755. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  47756. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  47757. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  47758. float32x4_t __ret; \
  47759. __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
  47760. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  47761. __ret; \
  47762. })
  47763. #define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47764. float32x4_t __s0 = __p0; \
  47765. float32x4_t __s1 = __p1; \
  47766. float32x4_t __s2 = __p2; \
  47767. float32x4_t __ret; \
  47768. __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
  47769. __ret; \
  47770. })
  47771. #endif
  47772. #ifdef __LITTLE_ENDIAN__
  47773. #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47774. float64x1_t __s0 = __p0; \
  47775. float64x1_t __s1 = __p1; \
  47776. float64x2_t __s2 = __p2; \
  47777. float64x1_t __ret; \
  47778. __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
  47779. __ret; \
  47780. })
  47781. #else
  47782. #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47783. float64x1_t __s0 = __p0; \
  47784. float64x1_t __s1 = __p1; \
  47785. float64x2_t __s2 = __p2; \
  47786. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  47787. float64x1_t __ret; \
  47788. __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
  47789. __ret; \
  47790. })
  47791. #define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
  47792. float64x1_t __s0 = __p0; \
  47793. float64x1_t __s1 = __p1; \
  47794. float64x2_t __s2 = __p2; \
  47795. float64x1_t __ret; \
  47796. __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
  47797. __ret; \
  47798. })
  47799. #endif
  47800. #ifdef __LITTLE_ENDIAN__
  47801. #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47802. float32x2_t __s0 = __p0; \
  47803. float32x2_t __s1 = __p1; \
  47804. float32x4_t __s2 = __p2; \
  47805. float32x2_t __ret; \
  47806. __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
  47807. __ret; \
  47808. })
  47809. #else
  47810. #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47811. float32x2_t __s0 = __p0; \
  47812. float32x2_t __s1 = __p1; \
  47813. float32x4_t __s2 = __p2; \
  47814. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  47815. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  47816. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  47817. float32x2_t __ret; \
  47818. __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
  47819. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  47820. __ret; \
  47821. })
  47822. #define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  47823. float32x2_t __s0 = __p0; \
  47824. float32x2_t __s1 = __p1; \
  47825. float32x4_t __s2 = __p2; \
  47826. float32x2_t __ret; \
  47827. __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
  47828. __ret; \
  47829. })
  47830. #endif
  47831. #ifdef __LITTLE_ENDIAN__
  47832. __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  47833. float64x2_t __ret;
  47834. __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
  47835. return __ret;
  47836. }
  47837. #else
  47838. __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  47839. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  47840. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  47841. float64x2_t __ret;
  47842. __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
  47843. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47844. return __ret;
  47845. }
  47846. #endif
  47847. #ifdef __LITTLE_ENDIAN__
  47848. __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  47849. float32x4_t __ret;
  47850. __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
  47851. return __ret;
  47852. }
  47853. #else
  47854. __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  47855. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  47856. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  47857. float32x4_t __ret;
  47858. __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
  47859. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  47860. return __ret;
  47861. }
  47862. #endif
  47863. #ifdef __LITTLE_ENDIAN__
  47864. __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  47865. float32x2_t __ret;
  47866. __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
  47867. return __ret;
  47868. }
  47869. #else
  47870. __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  47871. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  47872. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  47873. float32x2_t __ret;
  47874. __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
  47875. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47876. return __ret;
  47877. }
  47878. #endif
  47879. #ifdef __LITTLE_ENDIAN__
  47880. __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  47881. float64x2_t __ret;
  47882. __ret = vfmaq_f64(__p0, -__p1, __p2);
  47883. return __ret;
  47884. }
  47885. #else
  47886. __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  47887. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  47888. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  47889. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  47890. float64x2_t __ret;
  47891. __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
  47892. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  47893. return __ret;
  47894. }
  47895. #endif
  47896. #ifdef __LITTLE_ENDIAN__
  47897. __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  47898. float64x1_t __ret;
  47899. __ret = vfma_f64(__p0, -__p1, __p2);
  47900. return __ret;
  47901. }
  47902. #else
  47903. __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  47904. float64x1_t __ret;
  47905. __ret = __noswap_vfma_f64(__p0, -__p1, __p2);
  47906. return __ret;
  47907. }
  47908. #endif
  47909. #ifdef __LITTLE_ENDIAN__
  47910. #define vfmsd_lane_f64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
  47911. float64_t __s0_100 = __p0_100; \
  47912. float64_t __s1_100 = __p1_100; \
  47913. float64x1_t __s2_100 = __p2_100; \
  47914. float64_t __ret_100; \
  47915. __ret_100 = vfmad_lane_f64(__s0_100, -__s1_100, __s2_100, __p3_100); \
  47916. __ret_100; \
  47917. })
  47918. #else
  47919. #define vfmsd_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
  47920. float64_t __s0_101 = __p0_101; \
  47921. float64_t __s1_101 = __p1_101; \
  47922. float64x1_t __s2_101 = __p2_101; \
  47923. float64_t __ret_101; \
  47924. __ret_101 = __noswap_vfmad_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
  47925. __ret_101; \
  47926. })
  47927. #endif
  47928. #ifdef __LITTLE_ENDIAN__
  47929. #define vfmss_lane_f32(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
  47930. float32_t __s0_102 = __p0_102; \
  47931. float32_t __s1_102 = __p1_102; \
  47932. float32x2_t __s2_102 = __p2_102; \
  47933. float32_t __ret_102; \
  47934. __ret_102 = vfmas_lane_f32(__s0_102, -__s1_102, __s2_102, __p3_102); \
  47935. __ret_102; \
  47936. })
  47937. #else
  47938. #define vfmss_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
  47939. float32_t __s0_103 = __p0_103; \
  47940. float32_t __s1_103 = __p1_103; \
  47941. float32x2_t __s2_103 = __p2_103; \
  47942. float32x2_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 1, 0); \
  47943. float32_t __ret_103; \
  47944. __ret_103 = __noswap_vfmas_lane_f32(__s0_103, -__s1_103, __rev2_103, __p3_103); \
  47945. __ret_103; \
  47946. })
  47947. #endif
  47948. #ifdef __LITTLE_ENDIAN__
  47949. #define vfmsq_lane_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
  47950. float64x2_t __s0_104 = __p0_104; \
  47951. float64x2_t __s1_104 = __p1_104; \
  47952. float64x1_t __s2_104 = __p2_104; \
  47953. float64x2_t __ret_104; \
  47954. __ret_104 = vfmaq_lane_f64(__s0_104, -__s1_104, __s2_104, __p3_104); \
  47955. __ret_104; \
  47956. })
  47957. #else
  47958. #define vfmsq_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
  47959. float64x2_t __s0_105 = __p0_105; \
  47960. float64x2_t __s1_105 = __p1_105; \
  47961. float64x1_t __s2_105 = __p2_105; \
  47962. float64x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \
  47963. float64x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \
  47964. float64x2_t __ret_105; \
  47965. __ret_105 = __noswap_vfmaq_lane_f64(__rev0_105, -__rev1_105, __s2_105, __p3_105); \
  47966. __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \
  47967. __ret_105; \
  47968. })
  47969. #endif
  47970. #ifdef __LITTLE_ENDIAN__
  47971. #define vfmsq_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
  47972. float32x4_t __s0_106 = __p0_106; \
  47973. float32x4_t __s1_106 = __p1_106; \
  47974. float32x2_t __s2_106 = __p2_106; \
  47975. float32x4_t __ret_106; \
  47976. __ret_106 = vfmaq_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
  47977. __ret_106; \
  47978. })
  47979. #else
  47980. #define vfmsq_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
  47981. float32x4_t __s0_107 = __p0_107; \
  47982. float32x4_t __s1_107 = __p1_107; \
  47983. float32x2_t __s2_107 = __p2_107; \
  47984. float32x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \
  47985. float32x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \
  47986. float32x2_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
  47987. float32x4_t __ret_107; \
  47988. __ret_107 = __noswap_vfmaq_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
  47989. __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \
  47990. __ret_107; \
  47991. })
  47992. #endif
  47993. #ifdef __LITTLE_ENDIAN__
  47994. #define vfms_lane_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
  47995. float64x1_t __s0_108 = __p0_108; \
  47996. float64x1_t __s1_108 = __p1_108; \
  47997. float64x1_t __s2_108 = __p2_108; \
  47998. float64x1_t __ret_108; \
  47999. __ret_108 = vfma_lane_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
  48000. __ret_108; \
  48001. })
  48002. #else
  48003. #define vfms_lane_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
  48004. float64x1_t __s0_109 = __p0_109; \
  48005. float64x1_t __s1_109 = __p1_109; \
  48006. float64x1_t __s2_109 = __p2_109; \
  48007. float64x1_t __ret_109; \
  48008. __ret_109 = __noswap_vfma_lane_f64(__s0_109, -__s1_109, __s2_109, __p3_109); \
  48009. __ret_109; \
  48010. })
  48011. #endif
  48012. #ifdef __LITTLE_ENDIAN__
  48013. #define vfms_lane_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
  48014. float32x2_t __s0_110 = __p0_110; \
  48015. float32x2_t __s1_110 = __p1_110; \
  48016. float32x2_t __s2_110 = __p2_110; \
  48017. float32x2_t __ret_110; \
  48018. __ret_110 = vfma_lane_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
  48019. __ret_110; \
  48020. })
  48021. #else
  48022. #define vfms_lane_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
  48023. float32x2_t __s0_111 = __p0_111; \
  48024. float32x2_t __s1_111 = __p1_111; \
  48025. float32x2_t __s2_111 = __p2_111; \
  48026. float32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
  48027. float32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
  48028. float32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
  48029. float32x2_t __ret_111; \
  48030. __ret_111 = __noswap_vfma_lane_f32(__rev0_111, -__rev1_111, __rev2_111, __p3_111); \
  48031. __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
  48032. __ret_111; \
  48033. })
  48034. #endif
  48035. #ifdef __LITTLE_ENDIAN__
  48036. #define vfmsd_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
  48037. float64_t __s0_112 = __p0_112; \
  48038. float64_t __s1_112 = __p1_112; \
  48039. float64x2_t __s2_112 = __p2_112; \
  48040. float64_t __ret_112; \
  48041. __ret_112 = vfmad_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
  48042. __ret_112; \
  48043. })
  48044. #else
  48045. #define vfmsd_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
  48046. float64_t __s0_113 = __p0_113; \
  48047. float64_t __s1_113 = __p1_113; \
  48048. float64x2_t __s2_113 = __p2_113; \
  48049. float64x2_t __rev2_113; __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
  48050. float64_t __ret_113; \
  48051. __ret_113 = __noswap_vfmad_laneq_f64(__s0_113, -__s1_113, __rev2_113, __p3_113); \
  48052. __ret_113; \
  48053. })
  48054. #endif
  48055. #ifdef __LITTLE_ENDIAN__
  48056. #define vfmss_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
  48057. float32_t __s0_114 = __p0_114; \
  48058. float32_t __s1_114 = __p1_114; \
  48059. float32x4_t __s2_114 = __p2_114; \
  48060. float32_t __ret_114; \
  48061. __ret_114 = vfmas_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
  48062. __ret_114; \
  48063. })
  48064. #else
  48065. #define vfmss_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
  48066. float32_t __s0_115 = __p0_115; \
  48067. float32_t __s1_115 = __p1_115; \
  48068. float32x4_t __s2_115 = __p2_115; \
  48069. float32x4_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
  48070. float32_t __ret_115; \
  48071. __ret_115 = __noswap_vfmas_laneq_f32(__s0_115, -__s1_115, __rev2_115, __p3_115); \
  48072. __ret_115; \
  48073. })
  48074. #endif
  48075. #ifdef __LITTLE_ENDIAN__
  48076. #define vfmsq_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
  48077. float64x2_t __s0_116 = __p0_116; \
  48078. float64x2_t __s1_116 = __p1_116; \
  48079. float64x2_t __s2_116 = __p2_116; \
  48080. float64x2_t __ret_116; \
  48081. __ret_116 = vfmaq_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
  48082. __ret_116; \
  48083. })
  48084. #else
  48085. #define vfmsq_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
  48086. float64x2_t __s0_117 = __p0_117; \
  48087. float64x2_t __s1_117 = __p1_117; \
  48088. float64x2_t __s2_117 = __p2_117; \
  48089. float64x2_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 1, 0); \
  48090. float64x2_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 1, 0); \
  48091. float64x2_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
  48092. float64x2_t __ret_117; \
  48093. __ret_117 = __noswap_vfmaq_laneq_f64(__rev0_117, -__rev1_117, __rev2_117, __p3_117); \
  48094. __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 1, 0); \
  48095. __ret_117; \
  48096. })
  48097. #endif
  48098. #ifdef __LITTLE_ENDIAN__
  48099. #define vfmsq_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
  48100. float32x4_t __s0_118 = __p0_118; \
  48101. float32x4_t __s1_118 = __p1_118; \
  48102. float32x4_t __s2_118 = __p2_118; \
  48103. float32x4_t __ret_118; \
  48104. __ret_118 = vfmaq_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
  48105. __ret_118; \
  48106. })
  48107. #else
  48108. #define vfmsq_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
  48109. float32x4_t __s0_119 = __p0_119; \
  48110. float32x4_t __s1_119 = __p1_119; \
  48111. float32x4_t __s2_119 = __p2_119; \
  48112. float32x4_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 3, 2, 1, 0); \
  48113. float32x4_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 3, 2, 1, 0); \
  48114. float32x4_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
  48115. float32x4_t __ret_119; \
  48116. __ret_119 = __noswap_vfmaq_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
  48117. __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 3, 2, 1, 0); \
  48118. __ret_119; \
  48119. })
  48120. #endif
  48121. #ifdef __LITTLE_ENDIAN__
  48122. #define vfms_laneq_f64(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \
  48123. float64x1_t __s0_120 = __p0_120; \
  48124. float64x1_t __s1_120 = __p1_120; \
  48125. float64x2_t __s2_120 = __p2_120; \
  48126. float64x1_t __ret_120; \
  48127. __ret_120 = vfma_laneq_f64(__s0_120, -__s1_120, __s2_120, __p3_120); \
  48128. __ret_120; \
  48129. })
  48130. #else
  48131. #define vfms_laneq_f64(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \
  48132. float64x1_t __s0_121 = __p0_121; \
  48133. float64x1_t __s1_121 = __p1_121; \
  48134. float64x2_t __s2_121 = __p2_121; \
  48135. float64x2_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 1, 0); \
  48136. float64x1_t __ret_121; \
  48137. __ret_121 = __noswap_vfma_laneq_f64(__s0_121, -__s1_121, __rev2_121, __p3_121); \
  48138. __ret_121; \
  48139. })
  48140. #endif
  48141. #ifdef __LITTLE_ENDIAN__
  48142. #define vfms_laneq_f32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \
  48143. float32x2_t __s0_122 = __p0_122; \
  48144. float32x2_t __s1_122 = __p1_122; \
  48145. float32x4_t __s2_122 = __p2_122; \
  48146. float32x2_t __ret_122; \
  48147. __ret_122 = vfma_laneq_f32(__s0_122, -__s1_122, __s2_122, __p3_122); \
  48148. __ret_122; \
  48149. })
  48150. #else
  48151. #define vfms_laneq_f32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \
  48152. float32x2_t __s0_123 = __p0_123; \
  48153. float32x2_t __s1_123 = __p1_123; \
  48154. float32x4_t __s2_123 = __p2_123; \
  48155. float32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \
  48156. float32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \
  48157. float32x4_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 3, 2, 1, 0); \
  48158. float32x2_t __ret_123; \
  48159. __ret_123 = __noswap_vfma_laneq_f32(__rev0_123, -__rev1_123, __rev2_123, __p3_123); \
  48160. __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \
  48161. __ret_123; \
  48162. })
  48163. #endif
  48164. #ifdef __LITTLE_ENDIAN__
  48165. __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  48166. float64x2_t __ret;
  48167. __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
  48168. return __ret;
  48169. }
  48170. #else
  48171. __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  48172. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48173. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  48174. float64x2_t __ret;
  48175. __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
  48176. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  48177. return __ret;
  48178. }
  48179. #endif
  48180. #ifdef __LITTLE_ENDIAN__
  48181. __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  48182. float32x4_t __ret;
  48183. __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
  48184. return __ret;
  48185. }
  48186. #else
  48187. __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
  48188. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  48189. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  48190. float32x4_t __ret;
  48191. __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
  48192. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  48193. return __ret;
  48194. }
  48195. #endif
  48196. #ifdef __LITTLE_ENDIAN__
  48197. __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  48198. float32x2_t __ret;
  48199. __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
  48200. return __ret;
  48201. }
  48202. #else
  48203. __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
  48204. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48205. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  48206. float32x2_t __ret;
  48207. __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
  48208. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  48209. return __ret;
  48210. }
  48211. #endif
  48212. #ifdef __LITTLE_ENDIAN__
  48213. __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
  48214. poly64x1_t __ret;
  48215. __ret = __builtin_shufflevector(__p0, __p0, 1);
  48216. return __ret;
  48217. }
  48218. #else
  48219. __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
  48220. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48221. poly64x1_t __ret;
  48222. __ret = __builtin_shufflevector(__rev0, __rev0, 1);
  48223. return __ret;
  48224. }
  48225. __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
  48226. poly64x1_t __ret;
  48227. __ret = __builtin_shufflevector(__p0, __p0, 1);
  48228. return __ret;
  48229. }
  48230. #endif
  48231. #ifdef __LITTLE_ENDIAN__
  48232. __ai float64x1_t vget_high_f64(float64x2_t __p0) {
  48233. float64x1_t __ret;
  48234. __ret = __builtin_shufflevector(__p0, __p0, 1);
  48235. return __ret;
  48236. }
  48237. #else
  48238. __ai float64x1_t vget_high_f64(float64x2_t __p0) {
  48239. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48240. float64x1_t __ret;
  48241. __ret = __builtin_shufflevector(__rev0, __rev0, 1);
  48242. return __ret;
  48243. }
  48244. #endif
  48245. #ifdef __LITTLE_ENDIAN__
  48246. #define vget_lane_p64(__p0, __p1) __extension__ ({ \
  48247. poly64x1_t __s0 = __p0; \
  48248. poly64_t __ret; \
  48249. __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  48250. __ret; \
  48251. })
  48252. #else
  48253. #define vget_lane_p64(__p0, __p1) __extension__ ({ \
  48254. poly64x1_t __s0 = __p0; \
  48255. poly64_t __ret; \
  48256. __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  48257. __ret; \
  48258. })
  48259. #define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \
  48260. poly64x1_t __s0 = __p0; \
  48261. poly64_t __ret; \
  48262. __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
  48263. __ret; \
  48264. })
  48265. #endif
  48266. #ifdef __LITTLE_ENDIAN__
  48267. #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
  48268. poly64x2_t __s0 = __p0; \
  48269. poly64_t __ret; \
  48270. __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  48271. __ret; \
  48272. })
  48273. #else
  48274. #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
  48275. poly64x2_t __s0 = __p0; \
  48276. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  48277. poly64_t __ret; \
  48278. __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
  48279. __ret; \
  48280. })
  48281. #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
  48282. poly64x2_t __s0 = __p0; \
  48283. poly64_t __ret; \
  48284. __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
  48285. __ret; \
  48286. })
  48287. #endif
  48288. #ifdef __LITTLE_ENDIAN__
  48289. #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
  48290. float64x2_t __s0 = __p0; \
  48291. float64_t __ret; \
  48292. __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
  48293. __ret; \
  48294. })
  48295. #else
  48296. #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
  48297. float64x2_t __s0 = __p0; \
  48298. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  48299. float64_t __ret; \
  48300. __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
  48301. __ret; \
  48302. })
  48303. #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
  48304. float64x2_t __s0 = __p0; \
  48305. float64_t __ret; \
  48306. __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
  48307. __ret; \
  48308. })
  48309. #endif
  48310. #ifdef __LITTLE_ENDIAN__
  48311. #define vget_lane_f64(__p0, __p1) __extension__ ({ \
  48312. float64x1_t __s0 = __p0; \
  48313. float64_t __ret; \
  48314. __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
  48315. __ret; \
  48316. })
  48317. #else
  48318. #define vget_lane_f64(__p0, __p1) __extension__ ({ \
  48319. float64x1_t __s0 = __p0; \
  48320. float64_t __ret; \
  48321. __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
  48322. __ret; \
  48323. })
  48324. #define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \
  48325. float64x1_t __s0 = __p0; \
  48326. float64_t __ret; \
  48327. __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
  48328. __ret; \
  48329. })
  48330. #endif
  48331. #ifdef __LITTLE_ENDIAN__
  48332. __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
  48333. poly64x1_t __ret;
  48334. __ret = __builtin_shufflevector(__p0, __p0, 0);
  48335. return __ret;
  48336. }
  48337. #else
  48338. __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
  48339. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48340. poly64x1_t __ret;
  48341. __ret = __builtin_shufflevector(__rev0, __rev0, 0);
  48342. return __ret;
  48343. }
  48344. #endif
  48345. #ifdef __LITTLE_ENDIAN__
  48346. __ai float64x1_t vget_low_f64(float64x2_t __p0) {
  48347. float64x1_t __ret;
  48348. __ret = __builtin_shufflevector(__p0, __p0, 0);
  48349. return __ret;
  48350. }
  48351. #else
  48352. __ai float64x1_t vget_low_f64(float64x2_t __p0) {
  48353. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  48354. float64x1_t __ret;
  48355. __ret = __builtin_shufflevector(__rev0, __rev0, 0);
  48356. return __ret;
  48357. }
  48358. #endif
  48359. #ifdef __LITTLE_ENDIAN__
  48360. #define vld1_p64(__p0) __extension__ ({ \
  48361. poly64x1_t __ret; \
  48362. __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
  48363. __ret; \
  48364. })
  48365. #else
  48366. #define vld1_p64(__p0) __extension__ ({ \
  48367. poly64x1_t __ret; \
  48368. __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
  48369. __ret; \
  48370. })
  48371. #endif
  48372. #ifdef __LITTLE_ENDIAN__
  48373. #define vld1q_p64(__p0) __extension__ ({ \
  48374. poly64x2_t __ret; \
  48375. __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
  48376. __ret; \
  48377. })
  48378. #else
  48379. #define vld1q_p64(__p0) __extension__ ({ \
  48380. poly64x2_t __ret; \
  48381. __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
  48382. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48383. __ret; \
  48384. })
  48385. #endif
  48386. #ifdef __LITTLE_ENDIAN__
  48387. #define vld1q_f64(__p0) __extension__ ({ \
  48388. float64x2_t __ret; \
  48389. __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
  48390. __ret; \
  48391. })
  48392. #else
  48393. #define vld1q_f64(__p0) __extension__ ({ \
  48394. float64x2_t __ret; \
  48395. __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
  48396. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48397. __ret; \
  48398. })
  48399. #endif
  48400. #ifdef __LITTLE_ENDIAN__
  48401. #define vld1_f64(__p0) __extension__ ({ \
  48402. float64x1_t __ret; \
  48403. __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
  48404. __ret; \
  48405. })
  48406. #else
  48407. #define vld1_f64(__p0) __extension__ ({ \
  48408. float64x1_t __ret; \
  48409. __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
  48410. __ret; \
  48411. })
  48412. #endif
  48413. #ifdef __LITTLE_ENDIAN__
  48414. #define vld1_dup_p64(__p0) __extension__ ({ \
  48415. poly64x1_t __ret; \
  48416. __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
  48417. __ret; \
  48418. })
  48419. #else
  48420. #define vld1_dup_p64(__p0) __extension__ ({ \
  48421. poly64x1_t __ret; \
  48422. __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
  48423. __ret; \
  48424. })
  48425. #endif
  48426. #ifdef __LITTLE_ENDIAN__
  48427. #define vld1q_dup_p64(__p0) __extension__ ({ \
  48428. poly64x2_t __ret; \
  48429. __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
  48430. __ret; \
  48431. })
  48432. #else
  48433. #define vld1q_dup_p64(__p0) __extension__ ({ \
  48434. poly64x2_t __ret; \
  48435. __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
  48436. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48437. __ret; \
  48438. })
  48439. #endif
  48440. #ifdef __LITTLE_ENDIAN__
  48441. #define vld1q_dup_f64(__p0) __extension__ ({ \
  48442. float64x2_t __ret; \
  48443. __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
  48444. __ret; \
  48445. })
  48446. #else
  48447. #define vld1q_dup_f64(__p0) __extension__ ({ \
  48448. float64x2_t __ret; \
  48449. __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
  48450. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48451. __ret; \
  48452. })
  48453. #endif
  48454. #ifdef __LITTLE_ENDIAN__
  48455. #define vld1_dup_f64(__p0) __extension__ ({ \
  48456. float64x1_t __ret; \
  48457. __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
  48458. __ret; \
  48459. })
  48460. #else
  48461. #define vld1_dup_f64(__p0) __extension__ ({ \
  48462. float64x1_t __ret; \
  48463. __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
  48464. __ret; \
  48465. })
  48466. #endif
  48467. #ifdef __LITTLE_ENDIAN__
  48468. #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  48469. poly64x1_t __s1 = __p1; \
  48470. poly64x1_t __ret; \
  48471. __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
  48472. __ret; \
  48473. })
  48474. #else
  48475. #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  48476. poly64x1_t __s1 = __p1; \
  48477. poly64x1_t __ret; \
  48478. __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
  48479. __ret; \
  48480. })
  48481. #endif
  48482. #ifdef __LITTLE_ENDIAN__
  48483. #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  48484. poly64x2_t __s1 = __p1; \
  48485. poly64x2_t __ret; \
  48486. __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
  48487. __ret; \
  48488. })
  48489. #else
  48490. #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  48491. poly64x2_t __s1 = __p1; \
  48492. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  48493. poly64x2_t __ret; \
  48494. __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
  48495. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48496. __ret; \
  48497. })
  48498. #endif
  48499. #ifdef __LITTLE_ENDIAN__
  48500. #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  48501. float64x2_t __s1 = __p1; \
  48502. float64x2_t __ret; \
  48503. __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
  48504. __ret; \
  48505. })
  48506. #else
  48507. #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  48508. float64x2_t __s1 = __p1; \
  48509. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  48510. float64x2_t __ret; \
  48511. __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
  48512. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  48513. __ret; \
  48514. })
  48515. #endif
  48516. #ifdef __LITTLE_ENDIAN__
  48517. #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  48518. float64x1_t __s1 = __p1; \
  48519. float64x1_t __ret; \
  48520. __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
  48521. __ret; \
  48522. })
  48523. #else
  48524. #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  48525. float64x1_t __s1 = __p1; \
  48526. float64x1_t __ret; \
  48527. __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
  48528. __ret; \
  48529. })
  48530. #endif
  48531. #ifdef __LITTLE_ENDIAN__
  48532. #define vld1_p8_x2(__p0) __extension__ ({ \
  48533. poly8x8x2_t __ret; \
  48534. __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
  48535. __ret; \
  48536. })
  48537. #else
  48538. #define vld1_p8_x2(__p0) __extension__ ({ \
  48539. poly8x8x2_t __ret; \
  48540. __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
  48541. \
  48542. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48543. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48544. __ret; \
  48545. })
  48546. #endif
  48547. #ifdef __LITTLE_ENDIAN__
  48548. #define vld1_p64_x2(__p0) __extension__ ({ \
  48549. poly64x1x2_t __ret; \
  48550. __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
  48551. __ret; \
  48552. })
  48553. #else
  48554. #define vld1_p64_x2(__p0) __extension__ ({ \
  48555. poly64x1x2_t __ret; \
  48556. __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
  48557. __ret; \
  48558. })
  48559. #endif
  48560. #ifdef __LITTLE_ENDIAN__
  48561. #define vld1_p16_x2(__p0) __extension__ ({ \
  48562. poly16x4x2_t __ret; \
  48563. __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
  48564. __ret; \
  48565. })
  48566. #else
  48567. #define vld1_p16_x2(__p0) __extension__ ({ \
  48568. poly16x4x2_t __ret; \
  48569. __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
  48570. \
  48571. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48572. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48573. __ret; \
  48574. })
  48575. #endif
  48576. #ifdef __LITTLE_ENDIAN__
  48577. #define vld1q_p8_x2(__p0) __extension__ ({ \
  48578. poly8x16x2_t __ret; \
  48579. __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
  48580. __ret; \
  48581. })
  48582. #else
  48583. #define vld1q_p8_x2(__p0) __extension__ ({ \
  48584. poly8x16x2_t __ret; \
  48585. __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
  48586. \
  48587. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48588. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48589. __ret; \
  48590. })
  48591. #endif
  48592. #ifdef __LITTLE_ENDIAN__
  48593. #define vld1q_p64_x2(__p0) __extension__ ({ \
  48594. poly64x2x2_t __ret; \
  48595. __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
  48596. __ret; \
  48597. })
  48598. #else
  48599. #define vld1q_p64_x2(__p0) __extension__ ({ \
  48600. poly64x2x2_t __ret; \
  48601. __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
  48602. \
  48603. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48604. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48605. __ret; \
  48606. })
  48607. #endif
  48608. #ifdef __LITTLE_ENDIAN__
  48609. #define vld1q_p16_x2(__p0) __extension__ ({ \
  48610. poly16x8x2_t __ret; \
  48611. __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
  48612. __ret; \
  48613. })
  48614. #else
  48615. #define vld1q_p16_x2(__p0) __extension__ ({ \
  48616. poly16x8x2_t __ret; \
  48617. __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
  48618. \
  48619. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48620. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48621. __ret; \
  48622. })
  48623. #endif
  48624. #ifdef __LITTLE_ENDIAN__
  48625. #define vld1q_u8_x2(__p0) __extension__ ({ \
  48626. uint8x16x2_t __ret; \
  48627. __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
  48628. __ret; \
  48629. })
  48630. #else
  48631. #define vld1q_u8_x2(__p0) __extension__ ({ \
  48632. uint8x16x2_t __ret; \
  48633. __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
  48634. \
  48635. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48636. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48637. __ret; \
  48638. })
  48639. #endif
  48640. #ifdef __LITTLE_ENDIAN__
  48641. #define vld1q_u32_x2(__p0) __extension__ ({ \
  48642. uint32x4x2_t __ret; \
  48643. __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
  48644. __ret; \
  48645. })
  48646. #else
  48647. #define vld1q_u32_x2(__p0) __extension__ ({ \
  48648. uint32x4x2_t __ret; \
  48649. __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
  48650. \
  48651. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48652. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48653. __ret; \
  48654. })
  48655. #endif
  48656. #ifdef __LITTLE_ENDIAN__
  48657. #define vld1q_u64_x2(__p0) __extension__ ({ \
  48658. uint64x2x2_t __ret; \
  48659. __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
  48660. __ret; \
  48661. })
  48662. #else
  48663. #define vld1q_u64_x2(__p0) __extension__ ({ \
  48664. uint64x2x2_t __ret; \
  48665. __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
  48666. \
  48667. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48668. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48669. __ret; \
  48670. })
  48671. #endif
  48672. #ifdef __LITTLE_ENDIAN__
  48673. #define vld1q_u16_x2(__p0) __extension__ ({ \
  48674. uint16x8x2_t __ret; \
  48675. __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
  48676. __ret; \
  48677. })
  48678. #else
  48679. #define vld1q_u16_x2(__p0) __extension__ ({ \
  48680. uint16x8x2_t __ret; \
  48681. __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
  48682. \
  48683. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48684. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48685. __ret; \
  48686. })
  48687. #endif
  48688. #ifdef __LITTLE_ENDIAN__
  48689. #define vld1q_s8_x2(__p0) __extension__ ({ \
  48690. int8x16x2_t __ret; \
  48691. __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
  48692. __ret; \
  48693. })
  48694. #else
  48695. #define vld1q_s8_x2(__p0) __extension__ ({ \
  48696. int8x16x2_t __ret; \
  48697. __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
  48698. \
  48699. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48700. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  48701. __ret; \
  48702. })
  48703. #endif
  48704. #ifdef __LITTLE_ENDIAN__
  48705. #define vld1q_f64_x2(__p0) __extension__ ({ \
  48706. float64x2x2_t __ret; \
  48707. __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
  48708. __ret; \
  48709. })
  48710. #else
  48711. #define vld1q_f64_x2(__p0) __extension__ ({ \
  48712. float64x2x2_t __ret; \
  48713. __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
  48714. \
  48715. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48716. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48717. __ret; \
  48718. })
  48719. #endif
  48720. #ifdef __LITTLE_ENDIAN__
  48721. #define vld1q_f32_x2(__p0) __extension__ ({ \
  48722. float32x4x2_t __ret; \
  48723. __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
  48724. __ret; \
  48725. })
  48726. #else
  48727. #define vld1q_f32_x2(__p0) __extension__ ({ \
  48728. float32x4x2_t __ret; \
  48729. __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
  48730. \
  48731. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48732. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48733. __ret; \
  48734. })
  48735. #endif
  48736. #ifdef __LITTLE_ENDIAN__
  48737. #define vld1q_f16_x2(__p0) __extension__ ({ \
  48738. float16x8x2_t __ret; \
  48739. __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
  48740. __ret; \
  48741. })
  48742. #else
  48743. #define vld1q_f16_x2(__p0) __extension__ ({ \
  48744. float16x8x2_t __ret; \
  48745. __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
  48746. \
  48747. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48748. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48749. __ret; \
  48750. })
  48751. #endif
  48752. #ifdef __LITTLE_ENDIAN__
  48753. #define vld1q_s32_x2(__p0) __extension__ ({ \
  48754. int32x4x2_t __ret; \
  48755. __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
  48756. __ret; \
  48757. })
  48758. #else
  48759. #define vld1q_s32_x2(__p0) __extension__ ({ \
  48760. int32x4x2_t __ret; \
  48761. __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
  48762. \
  48763. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48764. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48765. __ret; \
  48766. })
  48767. #endif
  48768. #ifdef __LITTLE_ENDIAN__
  48769. #define vld1q_s64_x2(__p0) __extension__ ({ \
  48770. int64x2x2_t __ret; \
  48771. __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
  48772. __ret; \
  48773. })
  48774. #else
  48775. #define vld1q_s64_x2(__p0) __extension__ ({ \
  48776. int64x2x2_t __ret; \
  48777. __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
  48778. \
  48779. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48780. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48781. __ret; \
  48782. })
  48783. #endif
  48784. #ifdef __LITTLE_ENDIAN__
  48785. #define vld1q_s16_x2(__p0) __extension__ ({ \
  48786. int16x8x2_t __ret; \
  48787. __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
  48788. __ret; \
  48789. })
  48790. #else
  48791. #define vld1q_s16_x2(__p0) __extension__ ({ \
  48792. int16x8x2_t __ret; \
  48793. __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
  48794. \
  48795. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48796. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48797. __ret; \
  48798. })
  48799. #endif
  48800. #ifdef __LITTLE_ENDIAN__
  48801. #define vld1_u8_x2(__p0) __extension__ ({ \
  48802. uint8x8x2_t __ret; \
  48803. __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
  48804. __ret; \
  48805. })
  48806. #else
  48807. #define vld1_u8_x2(__p0) __extension__ ({ \
  48808. uint8x8x2_t __ret; \
  48809. __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
  48810. \
  48811. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48812. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48813. __ret; \
  48814. })
  48815. #endif
  48816. #ifdef __LITTLE_ENDIAN__
  48817. #define vld1_u32_x2(__p0) __extension__ ({ \
  48818. uint32x2x2_t __ret; \
  48819. __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
  48820. __ret; \
  48821. })
  48822. #else
  48823. #define vld1_u32_x2(__p0) __extension__ ({ \
  48824. uint32x2x2_t __ret; \
  48825. __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
  48826. \
  48827. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48828. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48829. __ret; \
  48830. })
  48831. #endif
  48832. #ifdef __LITTLE_ENDIAN__
  48833. #define vld1_u64_x2(__p0) __extension__ ({ \
  48834. uint64x1x2_t __ret; \
  48835. __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
  48836. __ret; \
  48837. })
  48838. #else
  48839. #define vld1_u64_x2(__p0) __extension__ ({ \
  48840. uint64x1x2_t __ret; \
  48841. __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
  48842. __ret; \
  48843. })
  48844. #endif
  48845. #ifdef __LITTLE_ENDIAN__
  48846. #define vld1_u16_x2(__p0) __extension__ ({ \
  48847. uint16x4x2_t __ret; \
  48848. __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
  48849. __ret; \
  48850. })
  48851. #else
  48852. #define vld1_u16_x2(__p0) __extension__ ({ \
  48853. uint16x4x2_t __ret; \
  48854. __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
  48855. \
  48856. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48857. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48858. __ret; \
  48859. })
  48860. #endif
  48861. #ifdef __LITTLE_ENDIAN__
  48862. #define vld1_s8_x2(__p0) __extension__ ({ \
  48863. int8x8x2_t __ret; \
  48864. __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
  48865. __ret; \
  48866. })
  48867. #else
  48868. #define vld1_s8_x2(__p0) __extension__ ({ \
  48869. int8x8x2_t __ret; \
  48870. __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
  48871. \
  48872. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48873. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48874. __ret; \
  48875. })
  48876. #endif
  48877. #ifdef __LITTLE_ENDIAN__
  48878. #define vld1_f64_x2(__p0) __extension__ ({ \
  48879. float64x1x2_t __ret; \
  48880. __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
  48881. __ret; \
  48882. })
  48883. #else
  48884. #define vld1_f64_x2(__p0) __extension__ ({ \
  48885. float64x1x2_t __ret; \
  48886. __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
  48887. __ret; \
  48888. })
  48889. #endif
  48890. #ifdef __LITTLE_ENDIAN__
  48891. #define vld1_f32_x2(__p0) __extension__ ({ \
  48892. float32x2x2_t __ret; \
  48893. __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
  48894. __ret; \
  48895. })
  48896. #else
  48897. #define vld1_f32_x2(__p0) __extension__ ({ \
  48898. float32x2x2_t __ret; \
  48899. __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
  48900. \
  48901. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48902. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48903. __ret; \
  48904. })
  48905. #endif
  48906. #ifdef __LITTLE_ENDIAN__
  48907. #define vld1_f16_x2(__p0) __extension__ ({ \
  48908. float16x4x2_t __ret; \
  48909. __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
  48910. __ret; \
  48911. })
  48912. #else
  48913. #define vld1_f16_x2(__p0) __extension__ ({ \
  48914. float16x4x2_t __ret; \
  48915. __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
  48916. \
  48917. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48918. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48919. __ret; \
  48920. })
  48921. #endif
  48922. #ifdef __LITTLE_ENDIAN__
  48923. #define vld1_s32_x2(__p0) __extension__ ({ \
  48924. int32x2x2_t __ret; \
  48925. __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
  48926. __ret; \
  48927. })
  48928. #else
  48929. #define vld1_s32_x2(__p0) __extension__ ({ \
  48930. int32x2x2_t __ret; \
  48931. __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
  48932. \
  48933. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  48934. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  48935. __ret; \
  48936. })
  48937. #endif
  48938. #ifdef __LITTLE_ENDIAN__
  48939. #define vld1_s64_x2(__p0) __extension__ ({ \
  48940. int64x1x2_t __ret; \
  48941. __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
  48942. __ret; \
  48943. })
  48944. #else
  48945. #define vld1_s64_x2(__p0) __extension__ ({ \
  48946. int64x1x2_t __ret; \
  48947. __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
  48948. __ret; \
  48949. })
  48950. #endif
  48951. #ifdef __LITTLE_ENDIAN__
  48952. #define vld1_s16_x2(__p0) __extension__ ({ \
  48953. int16x4x2_t __ret; \
  48954. __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
  48955. __ret; \
  48956. })
  48957. #else
  48958. #define vld1_s16_x2(__p0) __extension__ ({ \
  48959. int16x4x2_t __ret; \
  48960. __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
  48961. \
  48962. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  48963. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  48964. __ret; \
  48965. })
  48966. #endif
  48967. #ifdef __LITTLE_ENDIAN__
  48968. #define vld1_p8_x3(__p0) __extension__ ({ \
  48969. poly8x8x3_t __ret; \
  48970. __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
  48971. __ret; \
  48972. })
  48973. #else
  48974. #define vld1_p8_x3(__p0) __extension__ ({ \
  48975. poly8x8x3_t __ret; \
  48976. __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
  48977. \
  48978. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  48979. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  48980. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  48981. __ret; \
  48982. })
  48983. #endif
  48984. #ifdef __LITTLE_ENDIAN__
  48985. #define vld1_p64_x3(__p0) __extension__ ({ \
  48986. poly64x1x3_t __ret; \
  48987. __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
  48988. __ret; \
  48989. })
  48990. #else
  48991. #define vld1_p64_x3(__p0) __extension__ ({ \
  48992. poly64x1x3_t __ret; \
  48993. __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
  48994. __ret; \
  48995. })
  48996. #endif
  48997. #ifdef __LITTLE_ENDIAN__
  48998. #define vld1_p16_x3(__p0) __extension__ ({ \
  48999. poly16x4x3_t __ret; \
  49000. __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
  49001. __ret; \
  49002. })
  49003. #else
  49004. #define vld1_p16_x3(__p0) __extension__ ({ \
  49005. poly16x4x3_t __ret; \
  49006. __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
  49007. \
  49008. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49009. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49010. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49011. __ret; \
  49012. })
  49013. #endif
  49014. #ifdef __LITTLE_ENDIAN__
  49015. #define vld1q_p8_x3(__p0) __extension__ ({ \
  49016. poly8x16x3_t __ret; \
  49017. __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
  49018. __ret; \
  49019. })
  49020. #else
  49021. #define vld1q_p8_x3(__p0) __extension__ ({ \
  49022. poly8x16x3_t __ret; \
  49023. __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
  49024. \
  49025. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49026. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49027. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49028. __ret; \
  49029. })
  49030. #endif
  49031. #ifdef __LITTLE_ENDIAN__
  49032. #define vld1q_p64_x3(__p0) __extension__ ({ \
  49033. poly64x2x3_t __ret; \
  49034. __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
  49035. __ret; \
  49036. })
  49037. #else
  49038. #define vld1q_p64_x3(__p0) __extension__ ({ \
  49039. poly64x2x3_t __ret; \
  49040. __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
  49041. \
  49042. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49043. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49044. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49045. __ret; \
  49046. })
  49047. #endif
  49048. #ifdef __LITTLE_ENDIAN__
  49049. #define vld1q_p16_x3(__p0) __extension__ ({ \
  49050. poly16x8x3_t __ret; \
  49051. __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
  49052. __ret; \
  49053. })
  49054. #else
  49055. #define vld1q_p16_x3(__p0) __extension__ ({ \
  49056. poly16x8x3_t __ret; \
  49057. __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
  49058. \
  49059. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49060. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49061. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49062. __ret; \
  49063. })
  49064. #endif
  49065. #ifdef __LITTLE_ENDIAN__
  49066. #define vld1q_u8_x3(__p0) __extension__ ({ \
  49067. uint8x16x3_t __ret; \
  49068. __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
  49069. __ret; \
  49070. })
  49071. #else
  49072. #define vld1q_u8_x3(__p0) __extension__ ({ \
  49073. uint8x16x3_t __ret; \
  49074. __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
  49075. \
  49076. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49077. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49078. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49079. __ret; \
  49080. })
  49081. #endif
  49082. #ifdef __LITTLE_ENDIAN__
  49083. #define vld1q_u32_x3(__p0) __extension__ ({ \
  49084. uint32x4x3_t __ret; \
  49085. __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
  49086. __ret; \
  49087. })
  49088. #else
  49089. #define vld1q_u32_x3(__p0) __extension__ ({ \
  49090. uint32x4x3_t __ret; \
  49091. __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
  49092. \
  49093. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49094. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49095. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49096. __ret; \
  49097. })
  49098. #endif
  49099. #ifdef __LITTLE_ENDIAN__
  49100. #define vld1q_u64_x3(__p0) __extension__ ({ \
  49101. uint64x2x3_t __ret; \
  49102. __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
  49103. __ret; \
  49104. })
  49105. #else
  49106. #define vld1q_u64_x3(__p0) __extension__ ({ \
  49107. uint64x2x3_t __ret; \
  49108. __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
  49109. \
  49110. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49111. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49112. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49113. __ret; \
  49114. })
  49115. #endif
  49116. #ifdef __LITTLE_ENDIAN__
  49117. #define vld1q_u16_x3(__p0) __extension__ ({ \
  49118. uint16x8x3_t __ret; \
  49119. __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
  49120. __ret; \
  49121. })
  49122. #else
  49123. #define vld1q_u16_x3(__p0) __extension__ ({ \
  49124. uint16x8x3_t __ret; \
  49125. __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
  49126. \
  49127. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49128. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49129. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49130. __ret; \
  49131. })
  49132. #endif
  49133. #ifdef __LITTLE_ENDIAN__
  49134. #define vld1q_s8_x3(__p0) __extension__ ({ \
  49135. int8x16x3_t __ret; \
  49136. __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
  49137. __ret; \
  49138. })
  49139. #else
  49140. #define vld1q_s8_x3(__p0) __extension__ ({ \
  49141. int8x16x3_t __ret; \
  49142. __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
  49143. \
  49144. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49145. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49146. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49147. __ret; \
  49148. })
  49149. #endif
  49150. #ifdef __LITTLE_ENDIAN__
  49151. #define vld1q_f64_x3(__p0) __extension__ ({ \
  49152. float64x2x3_t __ret; \
  49153. __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
  49154. __ret; \
  49155. })
  49156. #else
  49157. #define vld1q_f64_x3(__p0) __extension__ ({ \
  49158. float64x2x3_t __ret; \
  49159. __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
  49160. \
  49161. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49162. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49163. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49164. __ret; \
  49165. })
  49166. #endif
  49167. #ifdef __LITTLE_ENDIAN__
  49168. #define vld1q_f32_x3(__p0) __extension__ ({ \
  49169. float32x4x3_t __ret; \
  49170. __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
  49171. __ret; \
  49172. })
  49173. #else
  49174. #define vld1q_f32_x3(__p0) __extension__ ({ \
  49175. float32x4x3_t __ret; \
  49176. __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
  49177. \
  49178. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49179. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49180. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49181. __ret; \
  49182. })
  49183. #endif
  49184. #ifdef __LITTLE_ENDIAN__
  49185. #define vld1q_f16_x3(__p0) __extension__ ({ \
  49186. float16x8x3_t __ret; \
  49187. __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
  49188. __ret; \
  49189. })
  49190. #else
  49191. #define vld1q_f16_x3(__p0) __extension__ ({ \
  49192. float16x8x3_t __ret; \
  49193. __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
  49194. \
  49195. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49196. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49197. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49198. __ret; \
  49199. })
  49200. #endif
  49201. #ifdef __LITTLE_ENDIAN__
  49202. #define vld1q_s32_x3(__p0) __extension__ ({ \
  49203. int32x4x3_t __ret; \
  49204. __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
  49205. __ret; \
  49206. })
  49207. #else
  49208. #define vld1q_s32_x3(__p0) __extension__ ({ \
  49209. int32x4x3_t __ret; \
  49210. __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
  49211. \
  49212. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49213. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49214. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49215. __ret; \
  49216. })
  49217. #endif
  49218. #ifdef __LITTLE_ENDIAN__
  49219. #define vld1q_s64_x3(__p0) __extension__ ({ \
  49220. int64x2x3_t __ret; \
  49221. __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
  49222. __ret; \
  49223. })
  49224. #else
  49225. #define vld1q_s64_x3(__p0) __extension__ ({ \
  49226. int64x2x3_t __ret; \
  49227. __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
  49228. \
  49229. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49230. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49231. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49232. __ret; \
  49233. })
  49234. #endif
  49235. #ifdef __LITTLE_ENDIAN__
  49236. #define vld1q_s16_x3(__p0) __extension__ ({ \
  49237. int16x8x3_t __ret; \
  49238. __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
  49239. __ret; \
  49240. })
  49241. #else
  49242. #define vld1q_s16_x3(__p0) __extension__ ({ \
  49243. int16x8x3_t __ret; \
  49244. __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
  49245. \
  49246. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49247. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49248. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49249. __ret; \
  49250. })
  49251. #endif
  49252. #ifdef __LITTLE_ENDIAN__
  49253. #define vld1_u8_x3(__p0) __extension__ ({ \
  49254. uint8x8x3_t __ret; \
  49255. __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
  49256. __ret; \
  49257. })
  49258. #else
  49259. #define vld1_u8_x3(__p0) __extension__ ({ \
  49260. uint8x8x3_t __ret; \
  49261. __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
  49262. \
  49263. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49264. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49265. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49266. __ret; \
  49267. })
  49268. #endif
  49269. #ifdef __LITTLE_ENDIAN__
  49270. #define vld1_u32_x3(__p0) __extension__ ({ \
  49271. uint32x2x3_t __ret; \
  49272. __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
  49273. __ret; \
  49274. })
  49275. #else
  49276. #define vld1_u32_x3(__p0) __extension__ ({ \
  49277. uint32x2x3_t __ret; \
  49278. __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
  49279. \
  49280. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49281. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49282. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49283. __ret; \
  49284. })
  49285. #endif
  49286. #ifdef __LITTLE_ENDIAN__
  49287. #define vld1_u64_x3(__p0) __extension__ ({ \
  49288. uint64x1x3_t __ret; \
  49289. __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
  49290. __ret; \
  49291. })
  49292. #else
  49293. #define vld1_u64_x3(__p0) __extension__ ({ \
  49294. uint64x1x3_t __ret; \
  49295. __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
  49296. __ret; \
  49297. })
  49298. #endif
  49299. #ifdef __LITTLE_ENDIAN__
  49300. #define vld1_u16_x3(__p0) __extension__ ({ \
  49301. uint16x4x3_t __ret; \
  49302. __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
  49303. __ret; \
  49304. })
  49305. #else
  49306. #define vld1_u16_x3(__p0) __extension__ ({ \
  49307. uint16x4x3_t __ret; \
  49308. __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
  49309. \
  49310. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49311. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49312. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49313. __ret; \
  49314. })
  49315. #endif
  49316. #ifdef __LITTLE_ENDIAN__
  49317. #define vld1_s8_x3(__p0) __extension__ ({ \
  49318. int8x8x3_t __ret; \
  49319. __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
  49320. __ret; \
  49321. })
  49322. #else
  49323. #define vld1_s8_x3(__p0) __extension__ ({ \
  49324. int8x8x3_t __ret; \
  49325. __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
  49326. \
  49327. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49328. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49329. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49330. __ret; \
  49331. })
  49332. #endif
  49333. #ifdef __LITTLE_ENDIAN__
  49334. #define vld1_f64_x3(__p0) __extension__ ({ \
  49335. float64x1x3_t __ret; \
  49336. __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
  49337. __ret; \
  49338. })
  49339. #else
  49340. #define vld1_f64_x3(__p0) __extension__ ({ \
  49341. float64x1x3_t __ret; \
  49342. __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
  49343. __ret; \
  49344. })
  49345. #endif
  49346. #ifdef __LITTLE_ENDIAN__
  49347. #define vld1_f32_x3(__p0) __extension__ ({ \
  49348. float32x2x3_t __ret; \
  49349. __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
  49350. __ret; \
  49351. })
  49352. #else
  49353. #define vld1_f32_x3(__p0) __extension__ ({ \
  49354. float32x2x3_t __ret; \
  49355. __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
  49356. \
  49357. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49358. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49359. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49360. __ret; \
  49361. })
  49362. #endif
  49363. #ifdef __LITTLE_ENDIAN__
  49364. #define vld1_f16_x3(__p0) __extension__ ({ \
  49365. float16x4x3_t __ret; \
  49366. __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
  49367. __ret; \
  49368. })
  49369. #else
  49370. #define vld1_f16_x3(__p0) __extension__ ({ \
  49371. float16x4x3_t __ret; \
  49372. __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
  49373. \
  49374. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49375. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49376. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49377. __ret; \
  49378. })
  49379. #endif
  49380. #ifdef __LITTLE_ENDIAN__
  49381. #define vld1_s32_x3(__p0) __extension__ ({ \
  49382. int32x2x3_t __ret; \
  49383. __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
  49384. __ret; \
  49385. })
  49386. #else
  49387. #define vld1_s32_x3(__p0) __extension__ ({ \
  49388. int32x2x3_t __ret; \
  49389. __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
  49390. \
  49391. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49392. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49393. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49394. __ret; \
  49395. })
  49396. #endif
  49397. #ifdef __LITTLE_ENDIAN__
  49398. #define vld1_s64_x3(__p0) __extension__ ({ \
  49399. int64x1x3_t __ret; \
  49400. __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
  49401. __ret; \
  49402. })
  49403. #else
  49404. #define vld1_s64_x3(__p0) __extension__ ({ \
  49405. int64x1x3_t __ret; \
  49406. __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
  49407. __ret; \
  49408. })
  49409. #endif
  49410. #ifdef __LITTLE_ENDIAN__
  49411. #define vld1_s16_x3(__p0) __extension__ ({ \
  49412. int16x4x3_t __ret; \
  49413. __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
  49414. __ret; \
  49415. })
  49416. #else
  49417. #define vld1_s16_x3(__p0) __extension__ ({ \
  49418. int16x4x3_t __ret; \
  49419. __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
  49420. \
  49421. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49422. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49423. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49424. __ret; \
  49425. })
  49426. #endif
  49427. #ifdef __LITTLE_ENDIAN__
  49428. #define vld1_p8_x4(__p0) __extension__ ({ \
  49429. poly8x8x4_t __ret; \
  49430. __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
  49431. __ret; \
  49432. })
  49433. #else
  49434. #define vld1_p8_x4(__p0) __extension__ ({ \
  49435. poly8x8x4_t __ret; \
  49436. __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
  49437. \
  49438. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49439. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49440. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49441. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49442. __ret; \
  49443. })
  49444. #endif
  49445. #ifdef __LITTLE_ENDIAN__
  49446. #define vld1_p64_x4(__p0) __extension__ ({ \
  49447. poly64x1x4_t __ret; \
  49448. __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
  49449. __ret; \
  49450. })
  49451. #else
  49452. #define vld1_p64_x4(__p0) __extension__ ({ \
  49453. poly64x1x4_t __ret; \
  49454. __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
  49455. __ret; \
  49456. })
  49457. #endif
  49458. #ifdef __LITTLE_ENDIAN__
  49459. #define vld1_p16_x4(__p0) __extension__ ({ \
  49460. poly16x4x4_t __ret; \
  49461. __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
  49462. __ret; \
  49463. })
  49464. #else
  49465. #define vld1_p16_x4(__p0) __extension__ ({ \
  49466. poly16x4x4_t __ret; \
  49467. __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
  49468. \
  49469. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49470. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49471. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49472. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49473. __ret; \
  49474. })
  49475. #endif
  49476. #ifdef __LITTLE_ENDIAN__
  49477. #define vld1q_p8_x4(__p0) __extension__ ({ \
  49478. poly8x16x4_t __ret; \
  49479. __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
  49480. __ret; \
  49481. })
  49482. #else
  49483. #define vld1q_p8_x4(__p0) __extension__ ({ \
  49484. poly8x16x4_t __ret; \
  49485. __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
  49486. \
  49487. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49488. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49489. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49490. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49491. __ret; \
  49492. })
  49493. #endif
  49494. #ifdef __LITTLE_ENDIAN__
  49495. #define vld1q_p64_x4(__p0) __extension__ ({ \
  49496. poly64x2x4_t __ret; \
  49497. __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
  49498. __ret; \
  49499. })
  49500. #else
  49501. #define vld1q_p64_x4(__p0) __extension__ ({ \
  49502. poly64x2x4_t __ret; \
  49503. __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
  49504. \
  49505. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49506. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49507. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49508. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49509. __ret; \
  49510. })
  49511. #endif
  49512. #ifdef __LITTLE_ENDIAN__
  49513. #define vld1q_p16_x4(__p0) __extension__ ({ \
  49514. poly16x8x4_t __ret; \
  49515. __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
  49516. __ret; \
  49517. })
  49518. #else
  49519. #define vld1q_p16_x4(__p0) __extension__ ({ \
  49520. poly16x8x4_t __ret; \
  49521. __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
  49522. \
  49523. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49524. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49525. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49526. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49527. __ret; \
  49528. })
  49529. #endif
  49530. #ifdef __LITTLE_ENDIAN__
  49531. #define vld1q_u8_x4(__p0) __extension__ ({ \
  49532. uint8x16x4_t __ret; \
  49533. __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
  49534. __ret; \
  49535. })
  49536. #else
  49537. #define vld1q_u8_x4(__p0) __extension__ ({ \
  49538. uint8x16x4_t __ret; \
  49539. __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
  49540. \
  49541. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49542. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49543. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49544. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49545. __ret; \
  49546. })
  49547. #endif
  49548. #ifdef __LITTLE_ENDIAN__
  49549. #define vld1q_u32_x4(__p0) __extension__ ({ \
  49550. uint32x4x4_t __ret; \
  49551. __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
  49552. __ret; \
  49553. })
  49554. #else
  49555. #define vld1q_u32_x4(__p0) __extension__ ({ \
  49556. uint32x4x4_t __ret; \
  49557. __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
  49558. \
  49559. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49560. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49561. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49562. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49563. __ret; \
  49564. })
  49565. #endif
  49566. #ifdef __LITTLE_ENDIAN__
  49567. #define vld1q_u64_x4(__p0) __extension__ ({ \
  49568. uint64x2x4_t __ret; \
  49569. __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
  49570. __ret; \
  49571. })
  49572. #else
  49573. #define vld1q_u64_x4(__p0) __extension__ ({ \
  49574. uint64x2x4_t __ret; \
  49575. __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
  49576. \
  49577. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49578. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49579. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49580. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49581. __ret; \
  49582. })
  49583. #endif
  49584. #ifdef __LITTLE_ENDIAN__
  49585. #define vld1q_u16_x4(__p0) __extension__ ({ \
  49586. uint16x8x4_t __ret; \
  49587. __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
  49588. __ret; \
  49589. })
  49590. #else
  49591. #define vld1q_u16_x4(__p0) __extension__ ({ \
  49592. uint16x8x4_t __ret; \
  49593. __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
  49594. \
  49595. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49596. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49597. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49598. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49599. __ret; \
  49600. })
  49601. #endif
  49602. #ifdef __LITTLE_ENDIAN__
  49603. #define vld1q_s8_x4(__p0) __extension__ ({ \
  49604. int8x16x4_t __ret; \
  49605. __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
  49606. __ret; \
  49607. })
  49608. #else
  49609. #define vld1q_s8_x4(__p0) __extension__ ({ \
  49610. int8x16x4_t __ret; \
  49611. __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
  49612. \
  49613. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49614. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49615. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49616. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  49617. __ret; \
  49618. })
  49619. #endif
  49620. #ifdef __LITTLE_ENDIAN__
  49621. #define vld1q_f64_x4(__p0) __extension__ ({ \
  49622. float64x2x4_t __ret; \
  49623. __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
  49624. __ret; \
  49625. })
  49626. #else
  49627. #define vld1q_f64_x4(__p0) __extension__ ({ \
  49628. float64x2x4_t __ret; \
  49629. __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
  49630. \
  49631. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49632. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49633. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49634. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49635. __ret; \
  49636. })
  49637. #endif
  49638. #ifdef __LITTLE_ENDIAN__
  49639. #define vld1q_f32_x4(__p0) __extension__ ({ \
  49640. float32x4x4_t __ret; \
  49641. __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
  49642. __ret; \
  49643. })
  49644. #else
  49645. #define vld1q_f32_x4(__p0) __extension__ ({ \
  49646. float32x4x4_t __ret; \
  49647. __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
  49648. \
  49649. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49650. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49651. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49652. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49653. __ret; \
  49654. })
  49655. #endif
  49656. #ifdef __LITTLE_ENDIAN__
  49657. #define vld1q_f16_x4(__p0) __extension__ ({ \
  49658. float16x8x4_t __ret; \
  49659. __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
  49660. __ret; \
  49661. })
  49662. #else
  49663. #define vld1q_f16_x4(__p0) __extension__ ({ \
  49664. float16x8x4_t __ret; \
  49665. __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
  49666. \
  49667. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49668. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49669. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49670. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49671. __ret; \
  49672. })
  49673. #endif
  49674. #ifdef __LITTLE_ENDIAN__
  49675. #define vld1q_s32_x4(__p0) __extension__ ({ \
  49676. int32x4x4_t __ret; \
  49677. __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
  49678. __ret; \
  49679. })
  49680. #else
  49681. #define vld1q_s32_x4(__p0) __extension__ ({ \
  49682. int32x4x4_t __ret; \
  49683. __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
  49684. \
  49685. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49686. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49687. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49688. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49689. __ret; \
  49690. })
  49691. #endif
  49692. #ifdef __LITTLE_ENDIAN__
  49693. #define vld1q_s64_x4(__p0) __extension__ ({ \
  49694. int64x2x4_t __ret; \
  49695. __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
  49696. __ret; \
  49697. })
  49698. #else
  49699. #define vld1q_s64_x4(__p0) __extension__ ({ \
  49700. int64x2x4_t __ret; \
  49701. __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
  49702. \
  49703. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49704. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49705. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49706. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49707. __ret; \
  49708. })
  49709. #endif
  49710. #ifdef __LITTLE_ENDIAN__
  49711. #define vld1q_s16_x4(__p0) __extension__ ({ \
  49712. int16x8x4_t __ret; \
  49713. __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
  49714. __ret; \
  49715. })
  49716. #else
  49717. #define vld1q_s16_x4(__p0) __extension__ ({ \
  49718. int16x8x4_t __ret; \
  49719. __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
  49720. \
  49721. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49722. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49723. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49724. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49725. __ret; \
  49726. })
  49727. #endif
  49728. #ifdef __LITTLE_ENDIAN__
  49729. #define vld1_u8_x4(__p0) __extension__ ({ \
  49730. uint8x8x4_t __ret; \
  49731. __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
  49732. __ret; \
  49733. })
  49734. #else
  49735. #define vld1_u8_x4(__p0) __extension__ ({ \
  49736. uint8x8x4_t __ret; \
  49737. __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
  49738. \
  49739. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49740. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49741. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49742. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49743. __ret; \
  49744. })
  49745. #endif
  49746. #ifdef __LITTLE_ENDIAN__
  49747. #define vld1_u32_x4(__p0) __extension__ ({ \
  49748. uint32x2x4_t __ret; \
  49749. __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
  49750. __ret; \
  49751. })
  49752. #else
  49753. #define vld1_u32_x4(__p0) __extension__ ({ \
  49754. uint32x2x4_t __ret; \
  49755. __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
  49756. \
  49757. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49758. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49759. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49760. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49761. __ret; \
  49762. })
  49763. #endif
  49764. #ifdef __LITTLE_ENDIAN__
  49765. #define vld1_u64_x4(__p0) __extension__ ({ \
  49766. uint64x1x4_t __ret; \
  49767. __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
  49768. __ret; \
  49769. })
  49770. #else
  49771. #define vld1_u64_x4(__p0) __extension__ ({ \
  49772. uint64x1x4_t __ret; \
  49773. __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
  49774. __ret; \
  49775. })
  49776. #endif
  49777. #ifdef __LITTLE_ENDIAN__
  49778. #define vld1_u16_x4(__p0) __extension__ ({ \
  49779. uint16x4x4_t __ret; \
  49780. __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
  49781. __ret; \
  49782. })
  49783. #else
  49784. #define vld1_u16_x4(__p0) __extension__ ({ \
  49785. uint16x4x4_t __ret; \
  49786. __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
  49787. \
  49788. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49789. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49790. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49791. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49792. __ret; \
  49793. })
  49794. #endif
  49795. #ifdef __LITTLE_ENDIAN__
  49796. #define vld1_s8_x4(__p0) __extension__ ({ \
  49797. int8x8x4_t __ret; \
  49798. __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
  49799. __ret; \
  49800. })
  49801. #else
  49802. #define vld1_s8_x4(__p0) __extension__ ({ \
  49803. int8x8x4_t __ret; \
  49804. __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
  49805. \
  49806. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  49807. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  49808. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  49809. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  49810. __ret; \
  49811. })
  49812. #endif
  49813. #ifdef __LITTLE_ENDIAN__
  49814. #define vld1_f64_x4(__p0) __extension__ ({ \
  49815. float64x1x4_t __ret; \
  49816. __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
  49817. __ret; \
  49818. })
  49819. #else
  49820. #define vld1_f64_x4(__p0) __extension__ ({ \
  49821. float64x1x4_t __ret; \
  49822. __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
  49823. __ret; \
  49824. })
  49825. #endif
  49826. #ifdef __LITTLE_ENDIAN__
  49827. #define vld1_f32_x4(__p0) __extension__ ({ \
  49828. float32x2x4_t __ret; \
  49829. __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
  49830. __ret; \
  49831. })
  49832. #else
  49833. #define vld1_f32_x4(__p0) __extension__ ({ \
  49834. float32x2x4_t __ret; \
  49835. __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
  49836. \
  49837. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49838. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49839. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49840. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49841. __ret; \
  49842. })
  49843. #endif
  49844. #ifdef __LITTLE_ENDIAN__
  49845. #define vld1_f16_x4(__p0) __extension__ ({ \
  49846. float16x4x4_t __ret; \
  49847. __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
  49848. __ret; \
  49849. })
  49850. #else
  49851. #define vld1_f16_x4(__p0) __extension__ ({ \
  49852. float16x4x4_t __ret; \
  49853. __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
  49854. \
  49855. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49856. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49857. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49858. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49859. __ret; \
  49860. })
  49861. #endif
  49862. #ifdef __LITTLE_ENDIAN__
  49863. #define vld1_s32_x4(__p0) __extension__ ({ \
  49864. int32x2x4_t __ret; \
  49865. __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
  49866. __ret; \
  49867. })
  49868. #else
  49869. #define vld1_s32_x4(__p0) __extension__ ({ \
  49870. int32x2x4_t __ret; \
  49871. __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
  49872. \
  49873. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49874. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49875. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  49876. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  49877. __ret; \
  49878. })
  49879. #endif
  49880. #ifdef __LITTLE_ENDIAN__
  49881. #define vld1_s64_x4(__p0) __extension__ ({ \
  49882. int64x1x4_t __ret; \
  49883. __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
  49884. __ret; \
  49885. })
  49886. #else
  49887. #define vld1_s64_x4(__p0) __extension__ ({ \
  49888. int64x1x4_t __ret; \
  49889. __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
  49890. __ret; \
  49891. })
  49892. #endif
  49893. #ifdef __LITTLE_ENDIAN__
  49894. #define vld1_s16_x4(__p0) __extension__ ({ \
  49895. int16x4x4_t __ret; \
  49896. __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
  49897. __ret; \
  49898. })
  49899. #else
  49900. #define vld1_s16_x4(__p0) __extension__ ({ \
  49901. int16x4x4_t __ret; \
  49902. __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
  49903. \
  49904. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  49905. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  49906. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  49907. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  49908. __ret; \
  49909. })
  49910. #endif
  49911. #ifdef __LITTLE_ENDIAN__
  49912. #define vld2_p64(__p0) __extension__ ({ \
  49913. poly64x1x2_t __ret; \
  49914. __builtin_neon_vld2_v(&__ret, __p0, 6); \
  49915. __ret; \
  49916. })
  49917. #else
  49918. #define vld2_p64(__p0) __extension__ ({ \
  49919. poly64x1x2_t __ret; \
  49920. __builtin_neon_vld2_v(&__ret, __p0, 6); \
  49921. __ret; \
  49922. })
  49923. #endif
  49924. #ifdef __LITTLE_ENDIAN__
  49925. #define vld2q_p64(__p0) __extension__ ({ \
  49926. poly64x2x2_t __ret; \
  49927. __builtin_neon_vld2q_v(&__ret, __p0, 38); \
  49928. __ret; \
  49929. })
  49930. #else
  49931. #define vld2q_p64(__p0) __extension__ ({ \
  49932. poly64x2x2_t __ret; \
  49933. __builtin_neon_vld2q_v(&__ret, __p0, 38); \
  49934. \
  49935. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49936. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49937. __ret; \
  49938. })
  49939. #endif
  49940. #ifdef __LITTLE_ENDIAN__
  49941. #define vld2q_u64(__p0) __extension__ ({ \
  49942. uint64x2x2_t __ret; \
  49943. __builtin_neon_vld2q_v(&__ret, __p0, 51); \
  49944. __ret; \
  49945. })
  49946. #else
  49947. #define vld2q_u64(__p0) __extension__ ({ \
  49948. uint64x2x2_t __ret; \
  49949. __builtin_neon_vld2q_v(&__ret, __p0, 51); \
  49950. \
  49951. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49952. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49953. __ret; \
  49954. })
  49955. #endif
  49956. #ifdef __LITTLE_ENDIAN__
  49957. #define vld2q_f64(__p0) __extension__ ({ \
  49958. float64x2x2_t __ret; \
  49959. __builtin_neon_vld2q_v(&__ret, __p0, 42); \
  49960. __ret; \
  49961. })
  49962. #else
  49963. #define vld2q_f64(__p0) __extension__ ({ \
  49964. float64x2x2_t __ret; \
  49965. __builtin_neon_vld2q_v(&__ret, __p0, 42); \
  49966. \
  49967. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49968. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49969. __ret; \
  49970. })
  49971. #endif
  49972. #ifdef __LITTLE_ENDIAN__
  49973. #define vld2q_s64(__p0) __extension__ ({ \
  49974. int64x2x2_t __ret; \
  49975. __builtin_neon_vld2q_v(&__ret, __p0, 35); \
  49976. __ret; \
  49977. })
  49978. #else
  49979. #define vld2q_s64(__p0) __extension__ ({ \
  49980. int64x2x2_t __ret; \
  49981. __builtin_neon_vld2q_v(&__ret, __p0, 35); \
  49982. \
  49983. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  49984. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  49985. __ret; \
  49986. })
  49987. #endif
  49988. #ifdef __LITTLE_ENDIAN__
  49989. #define vld2_f64(__p0) __extension__ ({ \
  49990. float64x1x2_t __ret; \
  49991. __builtin_neon_vld2_v(&__ret, __p0, 10); \
  49992. __ret; \
  49993. })
  49994. #else
  49995. #define vld2_f64(__p0) __extension__ ({ \
  49996. float64x1x2_t __ret; \
  49997. __builtin_neon_vld2_v(&__ret, __p0, 10); \
  49998. __ret; \
  49999. })
  50000. #endif
  50001. #ifdef __LITTLE_ENDIAN__
  50002. #define vld2_dup_p64(__p0) __extension__ ({ \
  50003. poly64x1x2_t __ret; \
  50004. __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
  50005. __ret; \
  50006. })
  50007. #else
  50008. #define vld2_dup_p64(__p0) __extension__ ({ \
  50009. poly64x1x2_t __ret; \
  50010. __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
  50011. __ret; \
  50012. })
  50013. #endif
  50014. #ifdef __LITTLE_ENDIAN__
  50015. #define vld2q_dup_p8(__p0) __extension__ ({ \
  50016. poly8x16x2_t __ret; \
  50017. __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
  50018. __ret; \
  50019. })
  50020. #else
  50021. #define vld2q_dup_p8(__p0) __extension__ ({ \
  50022. poly8x16x2_t __ret; \
  50023. __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
  50024. \
  50025. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50026. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50027. __ret; \
  50028. })
  50029. #endif
  50030. #ifdef __LITTLE_ENDIAN__
  50031. #define vld2q_dup_p64(__p0) __extension__ ({ \
  50032. poly64x2x2_t __ret; \
  50033. __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
  50034. __ret; \
  50035. })
  50036. #else
  50037. #define vld2q_dup_p64(__p0) __extension__ ({ \
  50038. poly64x2x2_t __ret; \
  50039. __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
  50040. \
  50041. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50042. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50043. __ret; \
  50044. })
  50045. #endif
  50046. #ifdef __LITTLE_ENDIAN__
  50047. #define vld2q_dup_p16(__p0) __extension__ ({ \
  50048. poly16x8x2_t __ret; \
  50049. __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
  50050. __ret; \
  50051. })
  50052. #else
  50053. #define vld2q_dup_p16(__p0) __extension__ ({ \
  50054. poly16x8x2_t __ret; \
  50055. __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
  50056. \
  50057. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50058. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50059. __ret; \
  50060. })
  50061. #endif
  50062. #ifdef __LITTLE_ENDIAN__
  50063. #define vld2q_dup_u8(__p0) __extension__ ({ \
  50064. uint8x16x2_t __ret; \
  50065. __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
  50066. __ret; \
  50067. })
  50068. #else
  50069. #define vld2q_dup_u8(__p0) __extension__ ({ \
  50070. uint8x16x2_t __ret; \
  50071. __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
  50072. \
  50073. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50074. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50075. __ret; \
  50076. })
  50077. #endif
  50078. #ifdef __LITTLE_ENDIAN__
  50079. #define vld2q_dup_u32(__p0) __extension__ ({ \
  50080. uint32x4x2_t __ret; \
  50081. __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
  50082. __ret; \
  50083. })
  50084. #else
  50085. #define vld2q_dup_u32(__p0) __extension__ ({ \
  50086. uint32x4x2_t __ret; \
  50087. __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
  50088. \
  50089. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50090. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50091. __ret; \
  50092. })
  50093. #endif
  50094. #ifdef __LITTLE_ENDIAN__
  50095. #define vld2q_dup_u64(__p0) __extension__ ({ \
  50096. uint64x2x2_t __ret; \
  50097. __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
  50098. __ret; \
  50099. })
  50100. #else
  50101. #define vld2q_dup_u64(__p0) __extension__ ({ \
  50102. uint64x2x2_t __ret; \
  50103. __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
  50104. \
  50105. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50106. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50107. __ret; \
  50108. })
  50109. #endif
  50110. #ifdef __LITTLE_ENDIAN__
  50111. #define vld2q_dup_u16(__p0) __extension__ ({ \
  50112. uint16x8x2_t __ret; \
  50113. __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
  50114. __ret; \
  50115. })
  50116. #else
  50117. #define vld2q_dup_u16(__p0) __extension__ ({ \
  50118. uint16x8x2_t __ret; \
  50119. __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
  50120. \
  50121. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50122. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50123. __ret; \
  50124. })
  50125. #endif
  50126. #ifdef __LITTLE_ENDIAN__
  50127. #define vld2q_dup_s8(__p0) __extension__ ({ \
  50128. int8x16x2_t __ret; \
  50129. __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
  50130. __ret; \
  50131. })
  50132. #else
  50133. #define vld2q_dup_s8(__p0) __extension__ ({ \
  50134. int8x16x2_t __ret; \
  50135. __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
  50136. \
  50137. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50138. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50139. __ret; \
  50140. })
  50141. #endif
  50142. #ifdef __LITTLE_ENDIAN__
  50143. #define vld2q_dup_f64(__p0) __extension__ ({ \
  50144. float64x2x2_t __ret; \
  50145. __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
  50146. __ret; \
  50147. })
  50148. #else
  50149. #define vld2q_dup_f64(__p0) __extension__ ({ \
  50150. float64x2x2_t __ret; \
  50151. __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
  50152. \
  50153. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50154. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50155. __ret; \
  50156. })
  50157. #endif
  50158. #ifdef __LITTLE_ENDIAN__
  50159. #define vld2q_dup_f32(__p0) __extension__ ({ \
  50160. float32x4x2_t __ret; \
  50161. __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
  50162. __ret; \
  50163. })
  50164. #else
  50165. #define vld2q_dup_f32(__p0) __extension__ ({ \
  50166. float32x4x2_t __ret; \
  50167. __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
  50168. \
  50169. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50170. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50171. __ret; \
  50172. })
  50173. #endif
  50174. #ifdef __LITTLE_ENDIAN__
  50175. #define vld2q_dup_f16(__p0) __extension__ ({ \
  50176. float16x8x2_t __ret; \
  50177. __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
  50178. __ret; \
  50179. })
  50180. #else
  50181. #define vld2q_dup_f16(__p0) __extension__ ({ \
  50182. float16x8x2_t __ret; \
  50183. __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
  50184. \
  50185. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50186. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50187. __ret; \
  50188. })
  50189. #endif
  50190. #ifdef __LITTLE_ENDIAN__
  50191. #define vld2q_dup_s32(__p0) __extension__ ({ \
  50192. int32x4x2_t __ret; \
  50193. __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
  50194. __ret; \
  50195. })
  50196. #else
  50197. #define vld2q_dup_s32(__p0) __extension__ ({ \
  50198. int32x4x2_t __ret; \
  50199. __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
  50200. \
  50201. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50202. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50203. __ret; \
  50204. })
  50205. #endif
  50206. #ifdef __LITTLE_ENDIAN__
  50207. #define vld2q_dup_s64(__p0) __extension__ ({ \
  50208. int64x2x2_t __ret; \
  50209. __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
  50210. __ret; \
  50211. })
  50212. #else
  50213. #define vld2q_dup_s64(__p0) __extension__ ({ \
  50214. int64x2x2_t __ret; \
  50215. __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
  50216. \
  50217. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50218. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50219. __ret; \
  50220. })
  50221. #endif
  50222. #ifdef __LITTLE_ENDIAN__
  50223. #define vld2q_dup_s16(__p0) __extension__ ({ \
  50224. int16x8x2_t __ret; \
  50225. __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
  50226. __ret; \
  50227. })
  50228. #else
  50229. #define vld2q_dup_s16(__p0) __extension__ ({ \
  50230. int16x8x2_t __ret; \
  50231. __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
  50232. \
  50233. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50234. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50235. __ret; \
  50236. })
  50237. #endif
  50238. #ifdef __LITTLE_ENDIAN__
  50239. #define vld2_dup_f64(__p0) __extension__ ({ \
  50240. float64x1x2_t __ret; \
  50241. __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
  50242. __ret; \
  50243. })
  50244. #else
  50245. #define vld2_dup_f64(__p0) __extension__ ({ \
  50246. float64x1x2_t __ret; \
  50247. __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
  50248. __ret; \
  50249. })
  50250. #endif
  50251. #ifdef __LITTLE_ENDIAN__
  50252. #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50253. poly64x1x2_t __s1 = __p1; \
  50254. poly64x1x2_t __ret; \
  50255. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
  50256. __ret; \
  50257. })
  50258. #else
  50259. #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50260. poly64x1x2_t __s1 = __p1; \
  50261. poly64x1x2_t __ret; \
  50262. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
  50263. __ret; \
  50264. })
  50265. #endif
  50266. #ifdef __LITTLE_ENDIAN__
  50267. #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  50268. poly8x16x2_t __s1 = __p1; \
  50269. poly8x16x2_t __ret; \
  50270. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
  50271. __ret; \
  50272. })
  50273. #else
  50274. #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  50275. poly8x16x2_t __s1 = __p1; \
  50276. poly8x16x2_t __rev1; \
  50277. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50278. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50279. poly8x16x2_t __ret; \
  50280. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
  50281. \
  50282. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50283. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50284. __ret; \
  50285. })
  50286. #endif
  50287. #ifdef __LITTLE_ENDIAN__
  50288. #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50289. poly64x2x2_t __s1 = __p1; \
  50290. poly64x2x2_t __ret; \
  50291. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
  50292. __ret; \
  50293. })
  50294. #else
  50295. #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50296. poly64x2x2_t __s1 = __p1; \
  50297. poly64x2x2_t __rev1; \
  50298. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50299. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50300. poly64x2x2_t __ret; \
  50301. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
  50302. \
  50303. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50304. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50305. __ret; \
  50306. })
  50307. #endif
  50308. #ifdef __LITTLE_ENDIAN__
  50309. #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  50310. uint8x16x2_t __s1 = __p1; \
  50311. uint8x16x2_t __ret; \
  50312. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
  50313. __ret; \
  50314. })
  50315. #else
  50316. #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  50317. uint8x16x2_t __s1 = __p1; \
  50318. uint8x16x2_t __rev1; \
  50319. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50320. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50321. uint8x16x2_t __ret; \
  50322. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
  50323. \
  50324. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50325. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50326. __ret; \
  50327. })
  50328. #endif
  50329. #ifdef __LITTLE_ENDIAN__
  50330. #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50331. uint64x2x2_t __s1 = __p1; \
  50332. uint64x2x2_t __ret; \
  50333. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
  50334. __ret; \
  50335. })
  50336. #else
  50337. #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50338. uint64x2x2_t __s1 = __p1; \
  50339. uint64x2x2_t __rev1; \
  50340. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50341. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50342. uint64x2x2_t __ret; \
  50343. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
  50344. \
  50345. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50346. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50347. __ret; \
  50348. })
  50349. #endif
  50350. #ifdef __LITTLE_ENDIAN__
  50351. #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  50352. int8x16x2_t __s1 = __p1; \
  50353. int8x16x2_t __ret; \
  50354. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
  50355. __ret; \
  50356. })
  50357. #else
  50358. #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  50359. int8x16x2_t __s1 = __p1; \
  50360. int8x16x2_t __rev1; \
  50361. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50362. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50363. int8x16x2_t __ret; \
  50364. __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
  50365. \
  50366. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50367. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50368. __ret; \
  50369. })
  50370. #endif
  50371. #ifdef __LITTLE_ENDIAN__
  50372. #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50373. float64x2x2_t __s1 = __p1; \
  50374. float64x2x2_t __ret; \
  50375. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
  50376. __ret; \
  50377. })
  50378. #else
  50379. #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50380. float64x2x2_t __s1 = __p1; \
  50381. float64x2x2_t __rev1; \
  50382. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50383. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50384. float64x2x2_t __ret; \
  50385. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
  50386. \
  50387. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50388. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50389. __ret; \
  50390. })
  50391. #endif
  50392. #ifdef __LITTLE_ENDIAN__
  50393. #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50394. int64x2x2_t __s1 = __p1; \
  50395. int64x2x2_t __ret; \
  50396. __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
  50397. __ret; \
  50398. })
  50399. #else
  50400. #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50401. int64x2x2_t __s1 = __p1; \
  50402. int64x2x2_t __rev1; \
  50403. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50404. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50405. int64x2x2_t __ret; \
  50406. __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
  50407. \
  50408. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50409. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50410. __ret; \
  50411. })
  50412. #endif
  50413. #ifdef __LITTLE_ENDIAN__
  50414. #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50415. uint64x1x2_t __s1 = __p1; \
  50416. uint64x1x2_t __ret; \
  50417. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
  50418. __ret; \
  50419. })
  50420. #else
  50421. #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50422. uint64x1x2_t __s1 = __p1; \
  50423. uint64x1x2_t __ret; \
  50424. __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
  50425. __ret; \
  50426. })
  50427. #endif
  50428. #ifdef __LITTLE_ENDIAN__
  50429. #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50430. float64x1x2_t __s1 = __p1; \
  50431. float64x1x2_t __ret; \
  50432. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
  50433. __ret; \
  50434. })
  50435. #else
  50436. #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50437. float64x1x2_t __s1 = __p1; \
  50438. float64x1x2_t __ret; \
  50439. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
  50440. __ret; \
  50441. })
  50442. #endif
  50443. #ifdef __LITTLE_ENDIAN__
  50444. #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50445. int64x1x2_t __s1 = __p1; \
  50446. int64x1x2_t __ret; \
  50447. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
  50448. __ret; \
  50449. })
  50450. #else
  50451. #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50452. int64x1x2_t __s1 = __p1; \
  50453. int64x1x2_t __ret; \
  50454. __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
  50455. __ret; \
  50456. })
  50457. #endif
  50458. #ifdef __LITTLE_ENDIAN__
  50459. #define vld3_p64(__p0) __extension__ ({ \
  50460. poly64x1x3_t __ret; \
  50461. __builtin_neon_vld3_v(&__ret, __p0, 6); \
  50462. __ret; \
  50463. })
  50464. #else
  50465. #define vld3_p64(__p0) __extension__ ({ \
  50466. poly64x1x3_t __ret; \
  50467. __builtin_neon_vld3_v(&__ret, __p0, 6); \
  50468. __ret; \
  50469. })
  50470. #endif
  50471. #ifdef __LITTLE_ENDIAN__
  50472. #define vld3q_p64(__p0) __extension__ ({ \
  50473. poly64x2x3_t __ret; \
  50474. __builtin_neon_vld3q_v(&__ret, __p0, 38); \
  50475. __ret; \
  50476. })
  50477. #else
  50478. #define vld3q_p64(__p0) __extension__ ({ \
  50479. poly64x2x3_t __ret; \
  50480. __builtin_neon_vld3q_v(&__ret, __p0, 38); \
  50481. \
  50482. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50483. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50484. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50485. __ret; \
  50486. })
  50487. #endif
  50488. #ifdef __LITTLE_ENDIAN__
  50489. #define vld3q_u64(__p0) __extension__ ({ \
  50490. uint64x2x3_t __ret; \
  50491. __builtin_neon_vld3q_v(&__ret, __p0, 51); \
  50492. __ret; \
  50493. })
  50494. #else
  50495. #define vld3q_u64(__p0) __extension__ ({ \
  50496. uint64x2x3_t __ret; \
  50497. __builtin_neon_vld3q_v(&__ret, __p0, 51); \
  50498. \
  50499. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50500. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50501. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50502. __ret; \
  50503. })
  50504. #endif
  50505. #ifdef __LITTLE_ENDIAN__
  50506. #define vld3q_f64(__p0) __extension__ ({ \
  50507. float64x2x3_t __ret; \
  50508. __builtin_neon_vld3q_v(&__ret, __p0, 42); \
  50509. __ret; \
  50510. })
  50511. #else
  50512. #define vld3q_f64(__p0) __extension__ ({ \
  50513. float64x2x3_t __ret; \
  50514. __builtin_neon_vld3q_v(&__ret, __p0, 42); \
  50515. \
  50516. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50517. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50518. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50519. __ret; \
  50520. })
  50521. #endif
  50522. #ifdef __LITTLE_ENDIAN__
  50523. #define vld3q_s64(__p0) __extension__ ({ \
  50524. int64x2x3_t __ret; \
  50525. __builtin_neon_vld3q_v(&__ret, __p0, 35); \
  50526. __ret; \
  50527. })
  50528. #else
  50529. #define vld3q_s64(__p0) __extension__ ({ \
  50530. int64x2x3_t __ret; \
  50531. __builtin_neon_vld3q_v(&__ret, __p0, 35); \
  50532. \
  50533. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50534. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50535. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50536. __ret; \
  50537. })
  50538. #endif
  50539. #ifdef __LITTLE_ENDIAN__
  50540. #define vld3_f64(__p0) __extension__ ({ \
  50541. float64x1x3_t __ret; \
  50542. __builtin_neon_vld3_v(&__ret, __p0, 10); \
  50543. __ret; \
  50544. })
  50545. #else
  50546. #define vld3_f64(__p0) __extension__ ({ \
  50547. float64x1x3_t __ret; \
  50548. __builtin_neon_vld3_v(&__ret, __p0, 10); \
  50549. __ret; \
  50550. })
  50551. #endif
  50552. #ifdef __LITTLE_ENDIAN__
  50553. #define vld3_dup_p64(__p0) __extension__ ({ \
  50554. poly64x1x3_t __ret; \
  50555. __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
  50556. __ret; \
  50557. })
  50558. #else
  50559. #define vld3_dup_p64(__p0) __extension__ ({ \
  50560. poly64x1x3_t __ret; \
  50561. __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
  50562. __ret; \
  50563. })
  50564. #endif
  50565. #ifdef __LITTLE_ENDIAN__
  50566. #define vld3q_dup_p8(__p0) __extension__ ({ \
  50567. poly8x16x3_t __ret; \
  50568. __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
  50569. __ret; \
  50570. })
  50571. #else
  50572. #define vld3q_dup_p8(__p0) __extension__ ({ \
  50573. poly8x16x3_t __ret; \
  50574. __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
  50575. \
  50576. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50577. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50578. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50579. __ret; \
  50580. })
  50581. #endif
  50582. #ifdef __LITTLE_ENDIAN__
  50583. #define vld3q_dup_p64(__p0) __extension__ ({ \
  50584. poly64x2x3_t __ret; \
  50585. __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
  50586. __ret; \
  50587. })
  50588. #else
  50589. #define vld3q_dup_p64(__p0) __extension__ ({ \
  50590. poly64x2x3_t __ret; \
  50591. __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
  50592. \
  50593. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50594. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50595. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50596. __ret; \
  50597. })
  50598. #endif
  50599. #ifdef __LITTLE_ENDIAN__
  50600. #define vld3q_dup_p16(__p0) __extension__ ({ \
  50601. poly16x8x3_t __ret; \
  50602. __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
  50603. __ret; \
  50604. })
  50605. #else
  50606. #define vld3q_dup_p16(__p0) __extension__ ({ \
  50607. poly16x8x3_t __ret; \
  50608. __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
  50609. \
  50610. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50611. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50612. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  50613. __ret; \
  50614. })
  50615. #endif
  50616. #ifdef __LITTLE_ENDIAN__
  50617. #define vld3q_dup_u8(__p0) __extension__ ({ \
  50618. uint8x16x3_t __ret; \
  50619. __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
  50620. __ret; \
  50621. })
  50622. #else
  50623. #define vld3q_dup_u8(__p0) __extension__ ({ \
  50624. uint8x16x3_t __ret; \
  50625. __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
  50626. \
  50627. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50628. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50629. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50630. __ret; \
  50631. })
  50632. #endif
  50633. #ifdef __LITTLE_ENDIAN__
  50634. #define vld3q_dup_u32(__p0) __extension__ ({ \
  50635. uint32x4x3_t __ret; \
  50636. __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
  50637. __ret; \
  50638. })
  50639. #else
  50640. #define vld3q_dup_u32(__p0) __extension__ ({ \
  50641. uint32x4x3_t __ret; \
  50642. __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
  50643. \
  50644. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50645. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50646. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  50647. __ret; \
  50648. })
  50649. #endif
  50650. #ifdef __LITTLE_ENDIAN__
  50651. #define vld3q_dup_u64(__p0) __extension__ ({ \
  50652. uint64x2x3_t __ret; \
  50653. __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
  50654. __ret; \
  50655. })
  50656. #else
  50657. #define vld3q_dup_u64(__p0) __extension__ ({ \
  50658. uint64x2x3_t __ret; \
  50659. __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
  50660. \
  50661. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50662. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50663. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50664. __ret; \
  50665. })
  50666. #endif
  50667. #ifdef __LITTLE_ENDIAN__
  50668. #define vld3q_dup_u16(__p0) __extension__ ({ \
  50669. uint16x8x3_t __ret; \
  50670. __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
  50671. __ret; \
  50672. })
  50673. #else
  50674. #define vld3q_dup_u16(__p0) __extension__ ({ \
  50675. uint16x8x3_t __ret; \
  50676. __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
  50677. \
  50678. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50679. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50680. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  50681. __ret; \
  50682. })
  50683. #endif
  50684. #ifdef __LITTLE_ENDIAN__
  50685. #define vld3q_dup_s8(__p0) __extension__ ({ \
  50686. int8x16x3_t __ret; \
  50687. __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
  50688. __ret; \
  50689. })
  50690. #else
  50691. #define vld3q_dup_s8(__p0) __extension__ ({ \
  50692. int8x16x3_t __ret; \
  50693. __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
  50694. \
  50695. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50696. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50697. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50698. __ret; \
  50699. })
  50700. #endif
  50701. #ifdef __LITTLE_ENDIAN__
  50702. #define vld3q_dup_f64(__p0) __extension__ ({ \
  50703. float64x2x3_t __ret; \
  50704. __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
  50705. __ret; \
  50706. })
  50707. #else
  50708. #define vld3q_dup_f64(__p0) __extension__ ({ \
  50709. float64x2x3_t __ret; \
  50710. __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
  50711. \
  50712. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50713. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50714. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50715. __ret; \
  50716. })
  50717. #endif
  50718. #ifdef __LITTLE_ENDIAN__
  50719. #define vld3q_dup_f32(__p0) __extension__ ({ \
  50720. float32x4x3_t __ret; \
  50721. __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
  50722. __ret; \
  50723. })
  50724. #else
  50725. #define vld3q_dup_f32(__p0) __extension__ ({ \
  50726. float32x4x3_t __ret; \
  50727. __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
  50728. \
  50729. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50730. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50731. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  50732. __ret; \
  50733. })
  50734. #endif
  50735. #ifdef __LITTLE_ENDIAN__
  50736. #define vld3q_dup_f16(__p0) __extension__ ({ \
  50737. float16x8x3_t __ret; \
  50738. __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
  50739. __ret; \
  50740. })
  50741. #else
  50742. #define vld3q_dup_f16(__p0) __extension__ ({ \
  50743. float16x8x3_t __ret; \
  50744. __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
  50745. \
  50746. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50747. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50748. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  50749. __ret; \
  50750. })
  50751. #endif
  50752. #ifdef __LITTLE_ENDIAN__
  50753. #define vld3q_dup_s32(__p0) __extension__ ({ \
  50754. int32x4x3_t __ret; \
  50755. __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
  50756. __ret; \
  50757. })
  50758. #else
  50759. #define vld3q_dup_s32(__p0) __extension__ ({ \
  50760. int32x4x3_t __ret; \
  50761. __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
  50762. \
  50763. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  50764. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  50765. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  50766. __ret; \
  50767. })
  50768. #endif
  50769. #ifdef __LITTLE_ENDIAN__
  50770. #define vld3q_dup_s64(__p0) __extension__ ({ \
  50771. int64x2x3_t __ret; \
  50772. __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
  50773. __ret; \
  50774. })
  50775. #else
  50776. #define vld3q_dup_s64(__p0) __extension__ ({ \
  50777. int64x2x3_t __ret; \
  50778. __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
  50779. \
  50780. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50781. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50782. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50783. __ret; \
  50784. })
  50785. #endif
  50786. #ifdef __LITTLE_ENDIAN__
  50787. #define vld3q_dup_s16(__p0) __extension__ ({ \
  50788. int16x8x3_t __ret; \
  50789. __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
  50790. __ret; \
  50791. })
  50792. #else
  50793. #define vld3q_dup_s16(__p0) __extension__ ({ \
  50794. int16x8x3_t __ret; \
  50795. __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
  50796. \
  50797. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  50798. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  50799. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  50800. __ret; \
  50801. })
  50802. #endif
  50803. #ifdef __LITTLE_ENDIAN__
  50804. #define vld3_dup_f64(__p0) __extension__ ({ \
  50805. float64x1x3_t __ret; \
  50806. __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
  50807. __ret; \
  50808. })
  50809. #else
  50810. #define vld3_dup_f64(__p0) __extension__ ({ \
  50811. float64x1x3_t __ret; \
  50812. __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
  50813. __ret; \
  50814. })
  50815. #endif
  50816. #ifdef __LITTLE_ENDIAN__
  50817. #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50818. poly64x1x3_t __s1 = __p1; \
  50819. poly64x1x3_t __ret; \
  50820. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
  50821. __ret; \
  50822. })
  50823. #else
  50824. #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50825. poly64x1x3_t __s1 = __p1; \
  50826. poly64x1x3_t __ret; \
  50827. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
  50828. __ret; \
  50829. })
  50830. #endif
  50831. #ifdef __LITTLE_ENDIAN__
  50832. #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  50833. poly8x16x3_t __s1 = __p1; \
  50834. poly8x16x3_t __ret; \
  50835. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
  50836. __ret; \
  50837. })
  50838. #else
  50839. #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  50840. poly8x16x3_t __s1 = __p1; \
  50841. poly8x16x3_t __rev1; \
  50842. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50843. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50844. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50845. poly8x16x3_t __ret; \
  50846. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
  50847. \
  50848. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50849. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50850. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50851. __ret; \
  50852. })
  50853. #endif
  50854. #ifdef __LITTLE_ENDIAN__
  50855. #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50856. poly64x2x3_t __s1 = __p1; \
  50857. poly64x2x3_t __ret; \
  50858. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
  50859. __ret; \
  50860. })
  50861. #else
  50862. #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  50863. poly64x2x3_t __s1 = __p1; \
  50864. poly64x2x3_t __rev1; \
  50865. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50866. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50867. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  50868. poly64x2x3_t __ret; \
  50869. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
  50870. \
  50871. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50872. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50873. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50874. __ret; \
  50875. })
  50876. #endif
  50877. #ifdef __LITTLE_ENDIAN__
  50878. #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  50879. uint8x16x3_t __s1 = __p1; \
  50880. uint8x16x3_t __ret; \
  50881. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
  50882. __ret; \
  50883. })
  50884. #else
  50885. #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  50886. uint8x16x3_t __s1 = __p1; \
  50887. uint8x16x3_t __rev1; \
  50888. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50889. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50890. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50891. uint8x16x3_t __ret; \
  50892. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
  50893. \
  50894. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50895. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50896. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50897. __ret; \
  50898. })
  50899. #endif
  50900. #ifdef __LITTLE_ENDIAN__
  50901. #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50902. uint64x2x3_t __s1 = __p1; \
  50903. uint64x2x3_t __ret; \
  50904. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
  50905. __ret; \
  50906. })
  50907. #else
  50908. #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50909. uint64x2x3_t __s1 = __p1; \
  50910. uint64x2x3_t __rev1; \
  50911. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50912. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50913. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  50914. uint64x2x3_t __ret; \
  50915. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
  50916. \
  50917. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50918. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50919. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50920. __ret; \
  50921. })
  50922. #endif
  50923. #ifdef __LITTLE_ENDIAN__
  50924. #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  50925. int8x16x3_t __s1 = __p1; \
  50926. int8x16x3_t __ret; \
  50927. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
  50928. __ret; \
  50929. })
  50930. #else
  50931. #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  50932. int8x16x3_t __s1 = __p1; \
  50933. int8x16x3_t __rev1; \
  50934. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50935. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50936. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50937. int8x16x3_t __ret; \
  50938. __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
  50939. \
  50940. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50941. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50942. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  50943. __ret; \
  50944. })
  50945. #endif
  50946. #ifdef __LITTLE_ENDIAN__
  50947. #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50948. float64x2x3_t __s1 = __p1; \
  50949. float64x2x3_t __ret; \
  50950. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
  50951. __ret; \
  50952. })
  50953. #else
  50954. #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  50955. float64x2x3_t __s1 = __p1; \
  50956. float64x2x3_t __rev1; \
  50957. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50958. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50959. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  50960. float64x2x3_t __ret; \
  50961. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
  50962. \
  50963. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50964. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50965. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50966. __ret; \
  50967. })
  50968. #endif
  50969. #ifdef __LITTLE_ENDIAN__
  50970. #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50971. int64x2x3_t __s1 = __p1; \
  50972. int64x2x3_t __ret; \
  50973. __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
  50974. __ret; \
  50975. })
  50976. #else
  50977. #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  50978. int64x2x3_t __s1 = __p1; \
  50979. int64x2x3_t __rev1; \
  50980. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  50981. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  50982. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  50983. int64x2x3_t __ret; \
  50984. __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
  50985. \
  50986. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  50987. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  50988. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  50989. __ret; \
  50990. })
  50991. #endif
  50992. #ifdef __LITTLE_ENDIAN__
  50993. #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  50994. uint64x1x3_t __s1 = __p1; \
  50995. uint64x1x3_t __ret; \
  50996. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
  50997. __ret; \
  50998. })
  50999. #else
  51000. #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  51001. uint64x1x3_t __s1 = __p1; \
  51002. uint64x1x3_t __ret; \
  51003. __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
  51004. __ret; \
  51005. })
  51006. #endif
  51007. #ifdef __LITTLE_ENDIAN__
  51008. #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51009. float64x1x3_t __s1 = __p1; \
  51010. float64x1x3_t __ret; \
  51011. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
  51012. __ret; \
  51013. })
  51014. #else
  51015. #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51016. float64x1x3_t __s1 = __p1; \
  51017. float64x1x3_t __ret; \
  51018. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
  51019. __ret; \
  51020. })
  51021. #endif
  51022. #ifdef __LITTLE_ENDIAN__
  51023. #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51024. int64x1x3_t __s1 = __p1; \
  51025. int64x1x3_t __ret; \
  51026. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
  51027. __ret; \
  51028. })
  51029. #else
  51030. #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51031. int64x1x3_t __s1 = __p1; \
  51032. int64x1x3_t __ret; \
  51033. __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
  51034. __ret; \
  51035. })
  51036. #endif
  51037. #ifdef __LITTLE_ENDIAN__
  51038. #define vld4_p64(__p0) __extension__ ({ \
  51039. poly64x1x4_t __ret; \
  51040. __builtin_neon_vld4_v(&__ret, __p0, 6); \
  51041. __ret; \
  51042. })
  51043. #else
  51044. #define vld4_p64(__p0) __extension__ ({ \
  51045. poly64x1x4_t __ret; \
  51046. __builtin_neon_vld4_v(&__ret, __p0, 6); \
  51047. __ret; \
  51048. })
  51049. #endif
  51050. #ifdef __LITTLE_ENDIAN__
  51051. #define vld4q_p64(__p0) __extension__ ({ \
  51052. poly64x2x4_t __ret; \
  51053. __builtin_neon_vld4q_v(&__ret, __p0, 38); \
  51054. __ret; \
  51055. })
  51056. #else
  51057. #define vld4q_p64(__p0) __extension__ ({ \
  51058. poly64x2x4_t __ret; \
  51059. __builtin_neon_vld4q_v(&__ret, __p0, 38); \
  51060. \
  51061. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51062. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51063. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51064. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51065. __ret; \
  51066. })
  51067. #endif
  51068. #ifdef __LITTLE_ENDIAN__
  51069. #define vld4q_u64(__p0) __extension__ ({ \
  51070. uint64x2x4_t __ret; \
  51071. __builtin_neon_vld4q_v(&__ret, __p0, 51); \
  51072. __ret; \
  51073. })
  51074. #else
  51075. #define vld4q_u64(__p0) __extension__ ({ \
  51076. uint64x2x4_t __ret; \
  51077. __builtin_neon_vld4q_v(&__ret, __p0, 51); \
  51078. \
  51079. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51080. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51081. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51082. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51083. __ret; \
  51084. })
  51085. #endif
  51086. #ifdef __LITTLE_ENDIAN__
  51087. #define vld4q_f64(__p0) __extension__ ({ \
  51088. float64x2x4_t __ret; \
  51089. __builtin_neon_vld4q_v(&__ret, __p0, 42); \
  51090. __ret; \
  51091. })
  51092. #else
  51093. #define vld4q_f64(__p0) __extension__ ({ \
  51094. float64x2x4_t __ret; \
  51095. __builtin_neon_vld4q_v(&__ret, __p0, 42); \
  51096. \
  51097. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51098. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51099. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51100. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51101. __ret; \
  51102. })
  51103. #endif
  51104. #ifdef __LITTLE_ENDIAN__
  51105. #define vld4q_s64(__p0) __extension__ ({ \
  51106. int64x2x4_t __ret; \
  51107. __builtin_neon_vld4q_v(&__ret, __p0, 35); \
  51108. __ret; \
  51109. })
  51110. #else
  51111. #define vld4q_s64(__p0) __extension__ ({ \
  51112. int64x2x4_t __ret; \
  51113. __builtin_neon_vld4q_v(&__ret, __p0, 35); \
  51114. \
  51115. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51116. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51117. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51118. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51119. __ret; \
  51120. })
  51121. #endif
  51122. #ifdef __LITTLE_ENDIAN__
  51123. #define vld4_f64(__p0) __extension__ ({ \
  51124. float64x1x4_t __ret; \
  51125. __builtin_neon_vld4_v(&__ret, __p0, 10); \
  51126. __ret; \
  51127. })
  51128. #else
  51129. #define vld4_f64(__p0) __extension__ ({ \
  51130. float64x1x4_t __ret; \
  51131. __builtin_neon_vld4_v(&__ret, __p0, 10); \
  51132. __ret; \
  51133. })
  51134. #endif
  51135. #ifdef __LITTLE_ENDIAN__
  51136. #define vld4_dup_p64(__p0) __extension__ ({ \
  51137. poly64x1x4_t __ret; \
  51138. __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
  51139. __ret; \
  51140. })
  51141. #else
  51142. #define vld4_dup_p64(__p0) __extension__ ({ \
  51143. poly64x1x4_t __ret; \
  51144. __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
  51145. __ret; \
  51146. })
  51147. #endif
  51148. #ifdef __LITTLE_ENDIAN__
  51149. #define vld4q_dup_p8(__p0) __extension__ ({ \
  51150. poly8x16x4_t __ret; \
  51151. __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
  51152. __ret; \
  51153. })
  51154. #else
  51155. #define vld4q_dup_p8(__p0) __extension__ ({ \
  51156. poly8x16x4_t __ret; \
  51157. __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
  51158. \
  51159. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51160. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51161. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51162. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51163. __ret; \
  51164. })
  51165. #endif
  51166. #ifdef __LITTLE_ENDIAN__
  51167. #define vld4q_dup_p64(__p0) __extension__ ({ \
  51168. poly64x2x4_t __ret; \
  51169. __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
  51170. __ret; \
  51171. })
  51172. #else
  51173. #define vld4q_dup_p64(__p0) __extension__ ({ \
  51174. poly64x2x4_t __ret; \
  51175. __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
  51176. \
  51177. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51178. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51179. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51180. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51181. __ret; \
  51182. })
  51183. #endif
  51184. #ifdef __LITTLE_ENDIAN__
  51185. #define vld4q_dup_p16(__p0) __extension__ ({ \
  51186. poly16x8x4_t __ret; \
  51187. __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
  51188. __ret; \
  51189. })
  51190. #else
  51191. #define vld4q_dup_p16(__p0) __extension__ ({ \
  51192. poly16x8x4_t __ret; \
  51193. __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
  51194. \
  51195. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  51196. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  51197. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  51198. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  51199. __ret; \
  51200. })
  51201. #endif
  51202. #ifdef __LITTLE_ENDIAN__
  51203. #define vld4q_dup_u8(__p0) __extension__ ({ \
  51204. uint8x16x4_t __ret; \
  51205. __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
  51206. __ret; \
  51207. })
  51208. #else
  51209. #define vld4q_dup_u8(__p0) __extension__ ({ \
  51210. uint8x16x4_t __ret; \
  51211. __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
  51212. \
  51213. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51214. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51215. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51216. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51217. __ret; \
  51218. })
  51219. #endif
  51220. #ifdef __LITTLE_ENDIAN__
  51221. #define vld4q_dup_u32(__p0) __extension__ ({ \
  51222. uint32x4x4_t __ret; \
  51223. __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
  51224. __ret; \
  51225. })
  51226. #else
  51227. #define vld4q_dup_u32(__p0) __extension__ ({ \
  51228. uint32x4x4_t __ret; \
  51229. __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
  51230. \
  51231. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  51232. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  51233. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  51234. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  51235. __ret; \
  51236. })
  51237. #endif
  51238. #ifdef __LITTLE_ENDIAN__
  51239. #define vld4q_dup_u64(__p0) __extension__ ({ \
  51240. uint64x2x4_t __ret; \
  51241. __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
  51242. __ret; \
  51243. })
  51244. #else
  51245. #define vld4q_dup_u64(__p0) __extension__ ({ \
  51246. uint64x2x4_t __ret; \
  51247. __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
  51248. \
  51249. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51250. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51251. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51252. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51253. __ret; \
  51254. })
  51255. #endif
  51256. #ifdef __LITTLE_ENDIAN__
  51257. #define vld4q_dup_u16(__p0) __extension__ ({ \
  51258. uint16x8x4_t __ret; \
  51259. __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
  51260. __ret; \
  51261. })
  51262. #else
  51263. #define vld4q_dup_u16(__p0) __extension__ ({ \
  51264. uint16x8x4_t __ret; \
  51265. __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
  51266. \
  51267. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  51268. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  51269. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  51270. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  51271. __ret; \
  51272. })
  51273. #endif
  51274. #ifdef __LITTLE_ENDIAN__
  51275. #define vld4q_dup_s8(__p0) __extension__ ({ \
  51276. int8x16x4_t __ret; \
  51277. __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
  51278. __ret; \
  51279. })
  51280. #else
  51281. #define vld4q_dup_s8(__p0) __extension__ ({ \
  51282. int8x16x4_t __ret; \
  51283. __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
  51284. \
  51285. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51286. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51287. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51288. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51289. __ret; \
  51290. })
  51291. #endif
  51292. #ifdef __LITTLE_ENDIAN__
  51293. #define vld4q_dup_f64(__p0) __extension__ ({ \
  51294. float64x2x4_t __ret; \
  51295. __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
  51296. __ret; \
  51297. })
  51298. #else
  51299. #define vld4q_dup_f64(__p0) __extension__ ({ \
  51300. float64x2x4_t __ret; \
  51301. __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
  51302. \
  51303. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51304. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51305. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51306. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51307. __ret; \
  51308. })
  51309. #endif
  51310. #ifdef __LITTLE_ENDIAN__
  51311. #define vld4q_dup_f32(__p0) __extension__ ({ \
  51312. float32x4x4_t __ret; \
  51313. __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
  51314. __ret; \
  51315. })
  51316. #else
  51317. #define vld4q_dup_f32(__p0) __extension__ ({ \
  51318. float32x4x4_t __ret; \
  51319. __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
  51320. \
  51321. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  51322. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  51323. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  51324. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  51325. __ret; \
  51326. })
  51327. #endif
  51328. #ifdef __LITTLE_ENDIAN__
  51329. #define vld4q_dup_f16(__p0) __extension__ ({ \
  51330. float16x8x4_t __ret; \
  51331. __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
  51332. __ret; \
  51333. })
  51334. #else
  51335. #define vld4q_dup_f16(__p0) __extension__ ({ \
  51336. float16x8x4_t __ret; \
  51337. __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
  51338. \
  51339. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  51340. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  51341. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  51342. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  51343. __ret; \
  51344. })
  51345. #endif
  51346. #ifdef __LITTLE_ENDIAN__
  51347. #define vld4q_dup_s32(__p0) __extension__ ({ \
  51348. int32x4x4_t __ret; \
  51349. __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
  51350. __ret; \
  51351. })
  51352. #else
  51353. #define vld4q_dup_s32(__p0) __extension__ ({ \
  51354. int32x4x4_t __ret; \
  51355. __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
  51356. \
  51357. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
  51358. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
  51359. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
  51360. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
  51361. __ret; \
  51362. })
  51363. #endif
  51364. #ifdef __LITTLE_ENDIAN__
  51365. #define vld4q_dup_s64(__p0) __extension__ ({ \
  51366. int64x2x4_t __ret; \
  51367. __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
  51368. __ret; \
  51369. })
  51370. #else
  51371. #define vld4q_dup_s64(__p0) __extension__ ({ \
  51372. int64x2x4_t __ret; \
  51373. __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
  51374. \
  51375. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51376. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51377. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51378. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51379. __ret; \
  51380. })
  51381. #endif
  51382. #ifdef __LITTLE_ENDIAN__
  51383. #define vld4q_dup_s16(__p0) __extension__ ({ \
  51384. int16x8x4_t __ret; \
  51385. __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
  51386. __ret; \
  51387. })
  51388. #else
  51389. #define vld4q_dup_s16(__p0) __extension__ ({ \
  51390. int16x8x4_t __ret; \
  51391. __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
  51392. \
  51393. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  51394. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  51395. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  51396. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  51397. __ret; \
  51398. })
  51399. #endif
  51400. #ifdef __LITTLE_ENDIAN__
  51401. #define vld4_dup_f64(__p0) __extension__ ({ \
  51402. float64x1x4_t __ret; \
  51403. __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
  51404. __ret; \
  51405. })
  51406. #else
  51407. #define vld4_dup_f64(__p0) __extension__ ({ \
  51408. float64x1x4_t __ret; \
  51409. __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
  51410. __ret; \
  51411. })
  51412. #endif
  51413. #ifdef __LITTLE_ENDIAN__
  51414. #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  51415. poly64x1x4_t __s1 = __p1; \
  51416. poly64x1x4_t __ret; \
  51417. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
  51418. __ret; \
  51419. })
  51420. #else
  51421. #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  51422. poly64x1x4_t __s1 = __p1; \
  51423. poly64x1x4_t __ret; \
  51424. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
  51425. __ret; \
  51426. })
  51427. #endif
  51428. #ifdef __LITTLE_ENDIAN__
  51429. #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  51430. poly8x16x4_t __s1 = __p1; \
  51431. poly8x16x4_t __ret; \
  51432. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
  51433. __ret; \
  51434. })
  51435. #else
  51436. #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  51437. poly8x16x4_t __s1 = __p1; \
  51438. poly8x16x4_t __rev1; \
  51439. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51440. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51441. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51442. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51443. poly8x16x4_t __ret; \
  51444. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
  51445. \
  51446. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51447. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51448. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51449. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51450. __ret; \
  51451. })
  51452. #endif
  51453. #ifdef __LITTLE_ENDIAN__
  51454. #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  51455. poly64x2x4_t __s1 = __p1; \
  51456. poly64x2x4_t __ret; \
  51457. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
  51458. __ret; \
  51459. })
  51460. #else
  51461. #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  51462. poly64x2x4_t __s1 = __p1; \
  51463. poly64x2x4_t __rev1; \
  51464. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  51465. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  51466. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  51467. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  51468. poly64x2x4_t __ret; \
  51469. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
  51470. \
  51471. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51472. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51473. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51474. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51475. __ret; \
  51476. })
  51477. #endif
  51478. #ifdef __LITTLE_ENDIAN__
  51479. #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  51480. uint8x16x4_t __s1 = __p1; \
  51481. uint8x16x4_t __ret; \
  51482. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
  51483. __ret; \
  51484. })
  51485. #else
  51486. #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  51487. uint8x16x4_t __s1 = __p1; \
  51488. uint8x16x4_t __rev1; \
  51489. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51490. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51491. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51492. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51493. uint8x16x4_t __ret; \
  51494. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
  51495. \
  51496. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51497. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51498. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51499. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51500. __ret; \
  51501. })
  51502. #endif
  51503. #ifdef __LITTLE_ENDIAN__
  51504. #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  51505. uint64x2x4_t __s1 = __p1; \
  51506. uint64x2x4_t __ret; \
  51507. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
  51508. __ret; \
  51509. })
  51510. #else
  51511. #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  51512. uint64x2x4_t __s1 = __p1; \
  51513. uint64x2x4_t __rev1; \
  51514. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  51515. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  51516. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  51517. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  51518. uint64x2x4_t __ret; \
  51519. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
  51520. \
  51521. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51522. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51523. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51524. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51525. __ret; \
  51526. })
  51527. #endif
  51528. #ifdef __LITTLE_ENDIAN__
  51529. #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  51530. int8x16x4_t __s1 = __p1; \
  51531. int8x16x4_t __ret; \
  51532. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
  51533. __ret; \
  51534. })
  51535. #else
  51536. #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  51537. int8x16x4_t __s1 = __p1; \
  51538. int8x16x4_t __rev1; \
  51539. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51540. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51541. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51542. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51543. int8x16x4_t __ret; \
  51544. __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
  51545. \
  51546. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51547. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51548. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51549. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  51550. __ret; \
  51551. })
  51552. #endif
  51553. #ifdef __LITTLE_ENDIAN__
  51554. #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51555. float64x2x4_t __s1 = __p1; \
  51556. float64x2x4_t __ret; \
  51557. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
  51558. __ret; \
  51559. })
  51560. #else
  51561. #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51562. float64x2x4_t __s1 = __p1; \
  51563. float64x2x4_t __rev1; \
  51564. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  51565. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  51566. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  51567. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  51568. float64x2x4_t __ret; \
  51569. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
  51570. \
  51571. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51572. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51573. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51574. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51575. __ret; \
  51576. })
  51577. #endif
  51578. #ifdef __LITTLE_ENDIAN__
  51579. #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51580. int64x2x4_t __s1 = __p1; \
  51581. int64x2x4_t __ret; \
  51582. __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
  51583. __ret; \
  51584. })
  51585. #else
  51586. #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51587. int64x2x4_t __s1 = __p1; \
  51588. int64x2x4_t __rev1; \
  51589. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  51590. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  51591. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  51592. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  51593. int64x2x4_t __ret; \
  51594. __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
  51595. \
  51596. __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
  51597. __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
  51598. __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
  51599. __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
  51600. __ret; \
  51601. })
  51602. #endif
  51603. #ifdef __LITTLE_ENDIAN__
  51604. #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  51605. uint64x1x4_t __s1 = __p1; \
  51606. uint64x1x4_t __ret; \
  51607. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
  51608. __ret; \
  51609. })
  51610. #else
  51611. #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  51612. uint64x1x4_t __s1 = __p1; \
  51613. uint64x1x4_t __ret; \
  51614. __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
  51615. __ret; \
  51616. })
  51617. #endif
  51618. #ifdef __LITTLE_ENDIAN__
  51619. #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51620. float64x1x4_t __s1 = __p1; \
  51621. float64x1x4_t __ret; \
  51622. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
  51623. __ret; \
  51624. })
  51625. #else
  51626. #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  51627. float64x1x4_t __s1 = __p1; \
  51628. float64x1x4_t __ret; \
  51629. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
  51630. __ret; \
  51631. })
  51632. #endif
  51633. #ifdef __LITTLE_ENDIAN__
  51634. #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51635. int64x1x4_t __s1 = __p1; \
  51636. int64x1x4_t __ret; \
  51637. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
  51638. __ret; \
  51639. })
  51640. #else
  51641. #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  51642. int64x1x4_t __s1 = __p1; \
  51643. int64x1x4_t __ret; \
  51644. __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
  51645. __ret; \
  51646. })
  51647. #endif
  51648. #ifdef __LITTLE_ENDIAN__
  51649. #define vldrq_p128(__p0) __extension__ ({ \
  51650. poly128_t __ret; \
  51651. __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
  51652. __ret; \
  51653. })
  51654. #else
  51655. #define vldrq_p128(__p0) __extension__ ({ \
  51656. poly128_t __ret; \
  51657. __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
  51658. __ret; \
  51659. })
  51660. #endif
  51661. #ifdef __LITTLE_ENDIAN__
  51662. __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
  51663. float64x2_t __ret;
  51664. __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  51665. return __ret;
  51666. }
  51667. #else
  51668. __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
  51669. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51670. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  51671. float64x2_t __ret;
  51672. __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  51673. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  51674. return __ret;
  51675. }
  51676. #endif
  51677. #ifdef __LITTLE_ENDIAN__
  51678. __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
  51679. float64x1_t __ret;
  51680. __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  51681. return __ret;
  51682. }
  51683. #else
  51684. __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
  51685. float64x1_t __ret;
  51686. __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  51687. return __ret;
  51688. }
  51689. #endif
  51690. #ifdef __LITTLE_ENDIAN__
  51691. __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
  51692. float64_t __ret;
  51693. __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
  51694. return __ret;
  51695. }
  51696. #else
  51697. __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
  51698. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51699. float64_t __ret;
  51700. __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
  51701. return __ret;
  51702. }
  51703. #endif
  51704. #ifdef __LITTLE_ENDIAN__
  51705. __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
  51706. float32_t __ret;
  51707. __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
  51708. return __ret;
  51709. }
  51710. #else
  51711. __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
  51712. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51713. float32_t __ret;
  51714. __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
  51715. return __ret;
  51716. }
  51717. #endif
  51718. #ifdef __LITTLE_ENDIAN__
  51719. __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
  51720. float32_t __ret;
  51721. __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
  51722. return __ret;
  51723. }
  51724. #else
  51725. __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
  51726. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51727. float32_t __ret;
  51728. __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
  51729. return __ret;
  51730. }
  51731. #endif
  51732. #ifdef __LITTLE_ENDIAN__
  51733. __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
  51734. uint8_t __ret;
  51735. __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
  51736. return __ret;
  51737. }
  51738. #else
  51739. __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
  51740. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  51741. uint8_t __ret;
  51742. __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
  51743. return __ret;
  51744. }
  51745. #endif
  51746. #ifdef __LITTLE_ENDIAN__
  51747. __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
  51748. uint32_t __ret;
  51749. __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
  51750. return __ret;
  51751. }
  51752. #else
  51753. __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
  51754. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51755. uint32_t __ret;
  51756. __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
  51757. return __ret;
  51758. }
  51759. #endif
  51760. #ifdef __LITTLE_ENDIAN__
  51761. __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
  51762. uint16_t __ret;
  51763. __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
  51764. return __ret;
  51765. }
  51766. #else
  51767. __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
  51768. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  51769. uint16_t __ret;
  51770. __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
  51771. return __ret;
  51772. }
  51773. #endif
  51774. #ifdef __LITTLE_ENDIAN__
  51775. __ai int8_t vmaxvq_s8(int8x16_t __p0) {
  51776. int8_t __ret;
  51777. __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
  51778. return __ret;
  51779. }
  51780. #else
  51781. __ai int8_t vmaxvq_s8(int8x16_t __p0) {
  51782. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  51783. int8_t __ret;
  51784. __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
  51785. return __ret;
  51786. }
  51787. #endif
  51788. #ifdef __LITTLE_ENDIAN__
  51789. __ai float64_t vmaxvq_f64(float64x2_t __p0) {
  51790. float64_t __ret;
  51791. __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
  51792. return __ret;
  51793. }
  51794. #else
  51795. __ai float64_t vmaxvq_f64(float64x2_t __p0) {
  51796. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51797. float64_t __ret;
  51798. __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
  51799. return __ret;
  51800. }
  51801. #endif
  51802. #ifdef __LITTLE_ENDIAN__
  51803. __ai float32_t vmaxvq_f32(float32x4_t __p0) {
  51804. float32_t __ret;
  51805. __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
  51806. return __ret;
  51807. }
  51808. #else
  51809. __ai float32_t vmaxvq_f32(float32x4_t __p0) {
  51810. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51811. float32_t __ret;
  51812. __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
  51813. return __ret;
  51814. }
  51815. #endif
  51816. #ifdef __LITTLE_ENDIAN__
  51817. __ai int32_t vmaxvq_s32(int32x4_t __p0) {
  51818. int32_t __ret;
  51819. __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
  51820. return __ret;
  51821. }
  51822. #else
  51823. __ai int32_t vmaxvq_s32(int32x4_t __p0) {
  51824. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51825. int32_t __ret;
  51826. __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
  51827. return __ret;
  51828. }
  51829. #endif
  51830. #ifdef __LITTLE_ENDIAN__
  51831. __ai int16_t vmaxvq_s16(int16x8_t __p0) {
  51832. int16_t __ret;
  51833. __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
  51834. return __ret;
  51835. }
  51836. #else
  51837. __ai int16_t vmaxvq_s16(int16x8_t __p0) {
  51838. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  51839. int16_t __ret;
  51840. __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
  51841. return __ret;
  51842. }
  51843. #endif
  51844. #ifdef __LITTLE_ENDIAN__
  51845. __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
  51846. uint8_t __ret;
  51847. __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
  51848. return __ret;
  51849. }
  51850. #else
  51851. __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
  51852. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  51853. uint8_t __ret;
  51854. __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
  51855. return __ret;
  51856. }
  51857. #endif
  51858. #ifdef __LITTLE_ENDIAN__
  51859. __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
  51860. uint32_t __ret;
  51861. __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
  51862. return __ret;
  51863. }
  51864. #else
  51865. __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
  51866. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51867. uint32_t __ret;
  51868. __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
  51869. return __ret;
  51870. }
  51871. #endif
  51872. #ifdef __LITTLE_ENDIAN__
  51873. __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
  51874. uint16_t __ret;
  51875. __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
  51876. return __ret;
  51877. }
  51878. #else
  51879. __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
  51880. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51881. uint16_t __ret;
  51882. __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
  51883. return __ret;
  51884. }
  51885. #endif
  51886. #ifdef __LITTLE_ENDIAN__
  51887. __ai int8_t vmaxv_s8(int8x8_t __p0) {
  51888. int8_t __ret;
  51889. __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
  51890. return __ret;
  51891. }
  51892. #else
  51893. __ai int8_t vmaxv_s8(int8x8_t __p0) {
  51894. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  51895. int8_t __ret;
  51896. __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
  51897. return __ret;
  51898. }
  51899. #endif
  51900. #ifdef __LITTLE_ENDIAN__
  51901. __ai float32_t vmaxv_f32(float32x2_t __p0) {
  51902. float32_t __ret;
  51903. __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
  51904. return __ret;
  51905. }
  51906. #else
  51907. __ai float32_t vmaxv_f32(float32x2_t __p0) {
  51908. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51909. float32_t __ret;
  51910. __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
  51911. return __ret;
  51912. }
  51913. #endif
  51914. #ifdef __LITTLE_ENDIAN__
  51915. __ai int32_t vmaxv_s32(int32x2_t __p0) {
  51916. int32_t __ret;
  51917. __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
  51918. return __ret;
  51919. }
  51920. #else
  51921. __ai int32_t vmaxv_s32(int32x2_t __p0) {
  51922. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51923. int32_t __ret;
  51924. __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
  51925. return __ret;
  51926. }
  51927. #endif
  51928. #ifdef __LITTLE_ENDIAN__
  51929. __ai int16_t vmaxv_s16(int16x4_t __p0) {
  51930. int16_t __ret;
  51931. __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
  51932. return __ret;
  51933. }
  51934. #else
  51935. __ai int16_t vmaxv_s16(int16x4_t __p0) {
  51936. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51937. int16_t __ret;
  51938. __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
  51939. return __ret;
  51940. }
  51941. #endif
  51942. #ifdef __LITTLE_ENDIAN__
  51943. __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
  51944. float64x2_t __ret;
  51945. __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  51946. return __ret;
  51947. }
  51948. #else
  51949. __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
  51950. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51951. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  51952. float64x2_t __ret;
  51953. __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  51954. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  51955. return __ret;
  51956. }
  51957. #endif
  51958. #ifdef __LITTLE_ENDIAN__
  51959. __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
  51960. float64x1_t __ret;
  51961. __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  51962. return __ret;
  51963. }
  51964. #else
  51965. __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
  51966. float64x1_t __ret;
  51967. __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  51968. return __ret;
  51969. }
  51970. #endif
  51971. #ifdef __LITTLE_ENDIAN__
  51972. __ai float64_t vminnmvq_f64(float64x2_t __p0) {
  51973. float64_t __ret;
  51974. __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
  51975. return __ret;
  51976. }
  51977. #else
  51978. __ai float64_t vminnmvq_f64(float64x2_t __p0) {
  51979. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  51980. float64_t __ret;
  51981. __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
  51982. return __ret;
  51983. }
  51984. #endif
  51985. #ifdef __LITTLE_ENDIAN__
  51986. __ai float32_t vminnmvq_f32(float32x4_t __p0) {
  51987. float32_t __ret;
  51988. __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
  51989. return __ret;
  51990. }
  51991. #else
  51992. __ai float32_t vminnmvq_f32(float32x4_t __p0) {
  51993. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  51994. float32_t __ret;
  51995. __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
  51996. return __ret;
  51997. }
  51998. #endif
  51999. #ifdef __LITTLE_ENDIAN__
  52000. __ai float32_t vminnmv_f32(float32x2_t __p0) {
  52001. float32_t __ret;
  52002. __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
  52003. return __ret;
  52004. }
  52005. #else
  52006. __ai float32_t vminnmv_f32(float32x2_t __p0) {
  52007. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52008. float32_t __ret;
  52009. __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
  52010. return __ret;
  52011. }
  52012. #endif
  52013. #ifdef __LITTLE_ENDIAN__
  52014. __ai uint8_t vminvq_u8(uint8x16_t __p0) {
  52015. uint8_t __ret;
  52016. __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
  52017. return __ret;
  52018. }
  52019. #else
  52020. __ai uint8_t vminvq_u8(uint8x16_t __p0) {
  52021. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  52022. uint8_t __ret;
  52023. __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
  52024. return __ret;
  52025. }
  52026. #endif
  52027. #ifdef __LITTLE_ENDIAN__
  52028. __ai uint32_t vminvq_u32(uint32x4_t __p0) {
  52029. uint32_t __ret;
  52030. __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
  52031. return __ret;
  52032. }
  52033. #else
  52034. __ai uint32_t vminvq_u32(uint32x4_t __p0) {
  52035. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  52036. uint32_t __ret;
  52037. __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
  52038. return __ret;
  52039. }
  52040. #endif
  52041. #ifdef __LITTLE_ENDIAN__
  52042. __ai uint16_t vminvq_u16(uint16x8_t __p0) {
  52043. uint16_t __ret;
  52044. __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
  52045. return __ret;
  52046. }
  52047. #else
  52048. __ai uint16_t vminvq_u16(uint16x8_t __p0) {
  52049. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  52050. uint16_t __ret;
  52051. __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
  52052. return __ret;
  52053. }
  52054. #endif
  52055. #ifdef __LITTLE_ENDIAN__
  52056. __ai int8_t vminvq_s8(int8x16_t __p0) {
  52057. int8_t __ret;
  52058. __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
  52059. return __ret;
  52060. }
  52061. #else
  52062. __ai int8_t vminvq_s8(int8x16_t __p0) {
  52063. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  52064. int8_t __ret;
  52065. __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
  52066. return __ret;
  52067. }
  52068. #endif
  52069. #ifdef __LITTLE_ENDIAN__
  52070. __ai float64_t vminvq_f64(float64x2_t __p0) {
  52071. float64_t __ret;
  52072. __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
  52073. return __ret;
  52074. }
  52075. #else
  52076. __ai float64_t vminvq_f64(float64x2_t __p0) {
  52077. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52078. float64_t __ret;
  52079. __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
  52080. return __ret;
  52081. }
  52082. #endif
  52083. #ifdef __LITTLE_ENDIAN__
  52084. __ai float32_t vminvq_f32(float32x4_t __p0) {
  52085. float32_t __ret;
  52086. __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
  52087. return __ret;
  52088. }
  52089. #else
  52090. __ai float32_t vminvq_f32(float32x4_t __p0) {
  52091. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  52092. float32_t __ret;
  52093. __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
  52094. return __ret;
  52095. }
  52096. #endif
  52097. #ifdef __LITTLE_ENDIAN__
  52098. __ai int32_t vminvq_s32(int32x4_t __p0) {
  52099. int32_t __ret;
  52100. __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
  52101. return __ret;
  52102. }
  52103. #else
  52104. __ai int32_t vminvq_s32(int32x4_t __p0) {
  52105. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  52106. int32_t __ret;
  52107. __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
  52108. return __ret;
  52109. }
  52110. #endif
  52111. #ifdef __LITTLE_ENDIAN__
  52112. __ai int16_t vminvq_s16(int16x8_t __p0) {
  52113. int16_t __ret;
  52114. __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
  52115. return __ret;
  52116. }
  52117. #else
  52118. __ai int16_t vminvq_s16(int16x8_t __p0) {
  52119. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  52120. int16_t __ret;
  52121. __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
  52122. return __ret;
  52123. }
  52124. #endif
  52125. #ifdef __LITTLE_ENDIAN__
  52126. __ai uint8_t vminv_u8(uint8x8_t __p0) {
  52127. uint8_t __ret;
  52128. __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
  52129. return __ret;
  52130. }
  52131. #else
  52132. __ai uint8_t vminv_u8(uint8x8_t __p0) {
  52133. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  52134. uint8_t __ret;
  52135. __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
  52136. return __ret;
  52137. }
  52138. #endif
  52139. #ifdef __LITTLE_ENDIAN__
  52140. __ai uint32_t vminv_u32(uint32x2_t __p0) {
  52141. uint32_t __ret;
  52142. __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
  52143. return __ret;
  52144. }
  52145. #else
  52146. __ai uint32_t vminv_u32(uint32x2_t __p0) {
  52147. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52148. uint32_t __ret;
  52149. __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
  52150. return __ret;
  52151. }
  52152. #endif
  52153. #ifdef __LITTLE_ENDIAN__
  52154. __ai uint16_t vminv_u16(uint16x4_t __p0) {
  52155. uint16_t __ret;
  52156. __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
  52157. return __ret;
  52158. }
  52159. #else
  52160. __ai uint16_t vminv_u16(uint16x4_t __p0) {
  52161. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  52162. uint16_t __ret;
  52163. __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
  52164. return __ret;
  52165. }
  52166. #endif
  52167. #ifdef __LITTLE_ENDIAN__
  52168. __ai int8_t vminv_s8(int8x8_t __p0) {
  52169. int8_t __ret;
  52170. __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
  52171. return __ret;
  52172. }
  52173. #else
  52174. __ai int8_t vminv_s8(int8x8_t __p0) {
  52175. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  52176. int8_t __ret;
  52177. __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
  52178. return __ret;
  52179. }
  52180. #endif
  52181. #ifdef __LITTLE_ENDIAN__
  52182. __ai float32_t vminv_f32(float32x2_t __p0) {
  52183. float32_t __ret;
  52184. __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
  52185. return __ret;
  52186. }
  52187. #else
  52188. __ai float32_t vminv_f32(float32x2_t __p0) {
  52189. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52190. float32_t __ret;
  52191. __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
  52192. return __ret;
  52193. }
  52194. #endif
  52195. #ifdef __LITTLE_ENDIAN__
  52196. __ai int32_t vminv_s32(int32x2_t __p0) {
  52197. int32_t __ret;
  52198. __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
  52199. return __ret;
  52200. }
  52201. #else
  52202. __ai int32_t vminv_s32(int32x2_t __p0) {
  52203. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52204. int32_t __ret;
  52205. __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
  52206. return __ret;
  52207. }
  52208. #endif
  52209. #ifdef __LITTLE_ENDIAN__
  52210. __ai int16_t vminv_s16(int16x4_t __p0) {
  52211. int16_t __ret;
  52212. __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
  52213. return __ret;
  52214. }
  52215. #else
  52216. __ai int16_t vminv_s16(int16x4_t __p0) {
  52217. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  52218. int16_t __ret;
  52219. __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
  52220. return __ret;
  52221. }
  52222. #endif
  52223. #ifdef __LITTLE_ENDIAN__
  52224. __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  52225. float64x2_t __ret;
  52226. __ret = __p0 + __p1 * __p2;
  52227. return __ret;
  52228. }
  52229. #else
  52230. __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  52231. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52232. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  52233. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  52234. float64x2_t __ret;
  52235. __ret = __rev0 + __rev1 * __rev2;
  52236. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  52237. return __ret;
  52238. }
  52239. #endif
  52240. #ifdef __LITTLE_ENDIAN__
  52241. __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  52242. float64x1_t __ret;
  52243. __ret = __p0 + __p1 * __p2;
  52244. return __ret;
  52245. }
  52246. #else
  52247. __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  52248. float64x1_t __ret;
  52249. __ret = __p0 + __p1 * __p2;
  52250. return __ret;
  52251. }
  52252. #endif
  52253. #ifdef __LITTLE_ENDIAN__
  52254. #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52255. uint32x4_t __s0 = __p0; \
  52256. uint32x4_t __s1 = __p1; \
  52257. uint32x4_t __s2 = __p2; \
  52258. uint32x4_t __ret; \
  52259. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52260. __ret; \
  52261. })
  52262. #else
  52263. #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52264. uint32x4_t __s0 = __p0; \
  52265. uint32x4_t __s1 = __p1; \
  52266. uint32x4_t __s2 = __p2; \
  52267. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52268. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52269. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52270. uint32x4_t __ret; \
  52271. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52272. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52273. __ret; \
  52274. })
  52275. #endif
  52276. #ifdef __LITTLE_ENDIAN__
  52277. #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52278. uint16x8_t __s0 = __p0; \
  52279. uint16x8_t __s1 = __p1; \
  52280. uint16x8_t __s2 = __p2; \
  52281. uint16x8_t __ret; \
  52282. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52283. __ret; \
  52284. })
  52285. #else
  52286. #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52287. uint16x8_t __s0 = __p0; \
  52288. uint16x8_t __s1 = __p1; \
  52289. uint16x8_t __s2 = __p2; \
  52290. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  52291. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52292. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52293. uint16x8_t __ret; \
  52294. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52295. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  52296. __ret; \
  52297. })
  52298. #endif
  52299. #ifdef __LITTLE_ENDIAN__
  52300. #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52301. float32x4_t __s0 = __p0; \
  52302. float32x4_t __s1 = __p1; \
  52303. float32x4_t __s2 = __p2; \
  52304. float32x4_t __ret; \
  52305. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52306. __ret; \
  52307. })
  52308. #else
  52309. #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52310. float32x4_t __s0 = __p0; \
  52311. float32x4_t __s1 = __p1; \
  52312. float32x4_t __s2 = __p2; \
  52313. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52314. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52315. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52316. float32x4_t __ret; \
  52317. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52318. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52319. __ret; \
  52320. })
  52321. #endif
  52322. #ifdef __LITTLE_ENDIAN__
  52323. #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52324. int32x4_t __s0 = __p0; \
  52325. int32x4_t __s1 = __p1; \
  52326. int32x4_t __s2 = __p2; \
  52327. int32x4_t __ret; \
  52328. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52329. __ret; \
  52330. })
  52331. #else
  52332. #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52333. int32x4_t __s0 = __p0; \
  52334. int32x4_t __s1 = __p1; \
  52335. int32x4_t __s2 = __p2; \
  52336. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52337. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52338. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52339. int32x4_t __ret; \
  52340. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52341. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52342. __ret; \
  52343. })
  52344. #endif
  52345. #ifdef __LITTLE_ENDIAN__
  52346. #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52347. int16x8_t __s0 = __p0; \
  52348. int16x8_t __s1 = __p1; \
  52349. int16x8_t __s2 = __p2; \
  52350. int16x8_t __ret; \
  52351. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52352. __ret; \
  52353. })
  52354. #else
  52355. #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52356. int16x8_t __s0 = __p0; \
  52357. int16x8_t __s1 = __p1; \
  52358. int16x8_t __s2 = __p2; \
  52359. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  52360. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52361. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52362. int16x8_t __ret; \
  52363. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52364. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  52365. __ret; \
  52366. })
  52367. #endif
  52368. #ifdef __LITTLE_ENDIAN__
  52369. #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52370. uint32x2_t __s0 = __p0; \
  52371. uint32x2_t __s1 = __p1; \
  52372. uint32x4_t __s2 = __p2; \
  52373. uint32x2_t __ret; \
  52374. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52375. __ret; \
  52376. })
  52377. #else
  52378. #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52379. uint32x2_t __s0 = __p0; \
  52380. uint32x2_t __s1 = __p1; \
  52381. uint32x4_t __s2 = __p2; \
  52382. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52383. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52384. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52385. uint32x2_t __ret; \
  52386. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  52387. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52388. __ret; \
  52389. })
  52390. #endif
  52391. #ifdef __LITTLE_ENDIAN__
  52392. #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52393. uint16x4_t __s0 = __p0; \
  52394. uint16x4_t __s1 = __p1; \
  52395. uint16x8_t __s2 = __p2; \
  52396. uint16x4_t __ret; \
  52397. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52398. __ret; \
  52399. })
  52400. #else
  52401. #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52402. uint16x4_t __s0 = __p0; \
  52403. uint16x4_t __s1 = __p1; \
  52404. uint16x8_t __s2 = __p2; \
  52405. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52406. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52407. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52408. uint16x4_t __ret; \
  52409. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52410. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52411. __ret; \
  52412. })
  52413. #endif
  52414. #ifdef __LITTLE_ENDIAN__
  52415. #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52416. float32x2_t __s0 = __p0; \
  52417. float32x2_t __s1 = __p1; \
  52418. float32x4_t __s2 = __p2; \
  52419. float32x2_t __ret; \
  52420. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52421. __ret; \
  52422. })
  52423. #else
  52424. #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52425. float32x2_t __s0 = __p0; \
  52426. float32x2_t __s1 = __p1; \
  52427. float32x4_t __s2 = __p2; \
  52428. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52429. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52430. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52431. float32x2_t __ret; \
  52432. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  52433. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52434. __ret; \
  52435. })
  52436. #endif
  52437. #ifdef __LITTLE_ENDIAN__
  52438. #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52439. int32x2_t __s0 = __p0; \
  52440. int32x2_t __s1 = __p1; \
  52441. int32x4_t __s2 = __p2; \
  52442. int32x2_t __ret; \
  52443. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52444. __ret; \
  52445. })
  52446. #else
  52447. #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52448. int32x2_t __s0 = __p0; \
  52449. int32x2_t __s1 = __p1; \
  52450. int32x4_t __s2 = __p2; \
  52451. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52452. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52453. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52454. int32x2_t __ret; \
  52455. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  52456. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52457. __ret; \
  52458. })
  52459. #endif
  52460. #ifdef __LITTLE_ENDIAN__
  52461. #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52462. int16x4_t __s0 = __p0; \
  52463. int16x4_t __s1 = __p1; \
  52464. int16x8_t __s2 = __p2; \
  52465. int16x4_t __ret; \
  52466. __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52467. __ret; \
  52468. })
  52469. #else
  52470. #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52471. int16x4_t __s0 = __p0; \
  52472. int16x4_t __s1 = __p1; \
  52473. int16x8_t __s2 = __p2; \
  52474. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52475. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52476. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52477. int16x4_t __ret; \
  52478. __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52479. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52480. __ret; \
  52481. })
  52482. #endif
  52483. #ifdef __LITTLE_ENDIAN__
  52484. __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  52485. float64x2_t __ret;
  52486. __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
  52487. return __ret;
  52488. }
  52489. #else
  52490. __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  52491. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52492. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  52493. float64x2_t __ret;
  52494. __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
  52495. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  52496. return __ret;
  52497. }
  52498. #endif
  52499. #ifdef __LITTLE_ENDIAN__
  52500. #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52501. uint64x2_t __s0 = __p0; \
  52502. uint32x4_t __s1 = __p1; \
  52503. uint32x2_t __s2 = __p2; \
  52504. uint64x2_t __ret; \
  52505. __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52506. __ret; \
  52507. })
  52508. #else
  52509. #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52510. uint64x2_t __s0 = __p0; \
  52511. uint32x4_t __s1 = __p1; \
  52512. uint32x2_t __s2 = __p2; \
  52513. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52514. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52515. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  52516. uint64x2_t __ret; \
  52517. __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52518. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52519. __ret; \
  52520. })
  52521. #endif
  52522. #ifdef __LITTLE_ENDIAN__
  52523. #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52524. uint32x4_t __s0 = __p0; \
  52525. uint16x8_t __s1 = __p1; \
  52526. uint16x4_t __s2 = __p2; \
  52527. uint32x4_t __ret; \
  52528. __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52529. __ret; \
  52530. })
  52531. #else
  52532. #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52533. uint32x4_t __s0 = __p0; \
  52534. uint16x8_t __s1 = __p1; \
  52535. uint16x4_t __s2 = __p2; \
  52536. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52537. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52538. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52539. uint32x4_t __ret; \
  52540. __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52541. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52542. __ret; \
  52543. })
  52544. #endif
  52545. #ifdef __LITTLE_ENDIAN__
  52546. #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52547. int64x2_t __s0 = __p0; \
  52548. int32x4_t __s1 = __p1; \
  52549. int32x2_t __s2 = __p2; \
  52550. int64x2_t __ret; \
  52551. __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52552. __ret; \
  52553. })
  52554. #else
  52555. #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52556. int64x2_t __s0 = __p0; \
  52557. int32x4_t __s1 = __p1; \
  52558. int32x2_t __s2 = __p2; \
  52559. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52560. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52561. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  52562. int64x2_t __ret; \
  52563. __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52564. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52565. __ret; \
  52566. })
  52567. #endif
  52568. #ifdef __LITTLE_ENDIAN__
  52569. #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52570. int32x4_t __s0 = __p0; \
  52571. int16x8_t __s1 = __p1; \
  52572. int16x4_t __s2 = __p2; \
  52573. int32x4_t __ret; \
  52574. __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52575. __ret; \
  52576. })
  52577. #else
  52578. #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52579. int32x4_t __s0 = __p0; \
  52580. int16x8_t __s1 = __p1; \
  52581. int16x4_t __s2 = __p2; \
  52582. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52583. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52584. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52585. int32x4_t __ret; \
  52586. __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52587. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52588. __ret; \
  52589. })
  52590. #endif
  52591. #ifdef __LITTLE_ENDIAN__
  52592. #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52593. uint64x2_t __s0 = __p0; \
  52594. uint32x4_t __s1 = __p1; \
  52595. uint32x4_t __s2 = __p2; \
  52596. uint64x2_t __ret; \
  52597. __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52598. __ret; \
  52599. })
  52600. #else
  52601. #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52602. uint64x2_t __s0 = __p0; \
  52603. uint32x4_t __s1 = __p1; \
  52604. uint32x4_t __s2 = __p2; \
  52605. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52606. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52607. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52608. uint64x2_t __ret; \
  52609. __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52610. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52611. __ret; \
  52612. })
  52613. #endif
  52614. #ifdef __LITTLE_ENDIAN__
  52615. #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52616. uint32x4_t __s0 = __p0; \
  52617. uint16x8_t __s1 = __p1; \
  52618. uint16x8_t __s2 = __p2; \
  52619. uint32x4_t __ret; \
  52620. __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52621. __ret; \
  52622. })
  52623. #else
  52624. #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52625. uint32x4_t __s0 = __p0; \
  52626. uint16x8_t __s1 = __p1; \
  52627. uint16x8_t __s2 = __p2; \
  52628. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52629. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52630. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52631. uint32x4_t __ret; \
  52632. __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52633. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52634. __ret; \
  52635. })
  52636. #endif
  52637. #ifdef __LITTLE_ENDIAN__
  52638. #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52639. int64x2_t __s0 = __p0; \
  52640. int32x4_t __s1 = __p1; \
  52641. int32x4_t __s2 = __p2; \
  52642. int64x2_t __ret; \
  52643. __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52644. __ret; \
  52645. })
  52646. #else
  52647. #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52648. int64x2_t __s0 = __p0; \
  52649. int32x4_t __s1 = __p1; \
  52650. int32x4_t __s2 = __p2; \
  52651. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52652. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52653. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52654. int64x2_t __ret; \
  52655. __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52656. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52657. __ret; \
  52658. })
  52659. #endif
  52660. #ifdef __LITTLE_ENDIAN__
  52661. #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52662. int32x4_t __s0 = __p0; \
  52663. int16x8_t __s1 = __p1; \
  52664. int16x8_t __s2 = __p2; \
  52665. int32x4_t __ret; \
  52666. __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52667. __ret; \
  52668. })
  52669. #else
  52670. #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52671. int32x4_t __s0 = __p0; \
  52672. int16x8_t __s1 = __p1; \
  52673. int16x8_t __s2 = __p2; \
  52674. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52675. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52676. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52677. int32x4_t __ret; \
  52678. __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52679. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52680. __ret; \
  52681. })
  52682. #endif
  52683. #ifdef __LITTLE_ENDIAN__
  52684. #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52685. uint64x2_t __s0 = __p0; \
  52686. uint32x2_t __s1 = __p1; \
  52687. uint32x4_t __s2 = __p2; \
  52688. uint64x2_t __ret; \
  52689. __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52690. __ret; \
  52691. })
  52692. #else
  52693. #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52694. uint64x2_t __s0 = __p0; \
  52695. uint32x2_t __s1 = __p1; \
  52696. uint32x4_t __s2 = __p2; \
  52697. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52698. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52699. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52700. uint64x2_t __ret; \
  52701. __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52702. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52703. __ret; \
  52704. })
  52705. #endif
  52706. #ifdef __LITTLE_ENDIAN__
  52707. #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52708. uint32x4_t __s0 = __p0; \
  52709. uint16x4_t __s1 = __p1; \
  52710. uint16x8_t __s2 = __p2; \
  52711. uint32x4_t __ret; \
  52712. __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52713. __ret; \
  52714. })
  52715. #else
  52716. #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52717. uint32x4_t __s0 = __p0; \
  52718. uint16x4_t __s1 = __p1; \
  52719. uint16x8_t __s2 = __p2; \
  52720. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52721. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52722. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52723. uint32x4_t __ret; \
  52724. __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52725. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52726. __ret; \
  52727. })
  52728. #endif
  52729. #ifdef __LITTLE_ENDIAN__
  52730. #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52731. int64x2_t __s0 = __p0; \
  52732. int32x2_t __s1 = __p1; \
  52733. int32x4_t __s2 = __p2; \
  52734. int64x2_t __ret; \
  52735. __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  52736. __ret; \
  52737. })
  52738. #else
  52739. #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52740. int64x2_t __s0 = __p0; \
  52741. int32x2_t __s1 = __p1; \
  52742. int32x4_t __s2 = __p2; \
  52743. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52744. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52745. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52746. int64x2_t __ret; \
  52747. __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  52748. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52749. __ret; \
  52750. })
  52751. #endif
  52752. #ifdef __LITTLE_ENDIAN__
  52753. #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52754. int32x4_t __s0 = __p0; \
  52755. int16x4_t __s1 = __p1; \
  52756. int16x8_t __s2 = __p2; \
  52757. int32x4_t __ret; \
  52758. __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  52759. __ret; \
  52760. })
  52761. #else
  52762. #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52763. int32x4_t __s0 = __p0; \
  52764. int16x4_t __s1 = __p1; \
  52765. int16x8_t __s2 = __p2; \
  52766. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52767. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52768. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52769. int32x4_t __ret; \
  52770. __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  52771. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52772. __ret; \
  52773. })
  52774. #endif
  52775. #ifdef __LITTLE_ENDIAN__
  52776. __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  52777. float64x2_t __ret;
  52778. __ret = __p0 - __p1 * __p2;
  52779. return __ret;
  52780. }
  52781. #else
  52782. __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
  52783. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  52784. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  52785. float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  52786. float64x2_t __ret;
  52787. __ret = __rev0 - __rev1 * __rev2;
  52788. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  52789. return __ret;
  52790. }
  52791. #endif
  52792. #ifdef __LITTLE_ENDIAN__
  52793. __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  52794. float64x1_t __ret;
  52795. __ret = __p0 - __p1 * __p2;
  52796. return __ret;
  52797. }
  52798. #else
  52799. __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
  52800. float64x1_t __ret;
  52801. __ret = __p0 - __p1 * __p2;
  52802. return __ret;
  52803. }
  52804. #endif
  52805. #ifdef __LITTLE_ENDIAN__
  52806. #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52807. uint32x4_t __s0 = __p0; \
  52808. uint32x4_t __s1 = __p1; \
  52809. uint32x4_t __s2 = __p2; \
  52810. uint32x4_t __ret; \
  52811. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52812. __ret; \
  52813. })
  52814. #else
  52815. #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52816. uint32x4_t __s0 = __p0; \
  52817. uint32x4_t __s1 = __p1; \
  52818. uint32x4_t __s2 = __p2; \
  52819. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52820. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52821. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52822. uint32x4_t __ret; \
  52823. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52824. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52825. __ret; \
  52826. })
  52827. #endif
  52828. #ifdef __LITTLE_ENDIAN__
  52829. #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52830. uint16x8_t __s0 = __p0; \
  52831. uint16x8_t __s1 = __p1; \
  52832. uint16x8_t __s2 = __p2; \
  52833. uint16x8_t __ret; \
  52834. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52835. __ret; \
  52836. })
  52837. #else
  52838. #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52839. uint16x8_t __s0 = __p0; \
  52840. uint16x8_t __s1 = __p1; \
  52841. uint16x8_t __s2 = __p2; \
  52842. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  52843. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52844. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52845. uint16x8_t __ret; \
  52846. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52847. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  52848. __ret; \
  52849. })
  52850. #endif
  52851. #ifdef __LITTLE_ENDIAN__
  52852. #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52853. float32x4_t __s0 = __p0; \
  52854. float32x4_t __s1 = __p1; \
  52855. float32x4_t __s2 = __p2; \
  52856. float32x4_t __ret; \
  52857. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52858. __ret; \
  52859. })
  52860. #else
  52861. #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52862. float32x4_t __s0 = __p0; \
  52863. float32x4_t __s1 = __p1; \
  52864. float32x4_t __s2 = __p2; \
  52865. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52866. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52867. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52868. float32x4_t __ret; \
  52869. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52870. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52871. __ret; \
  52872. })
  52873. #endif
  52874. #ifdef __LITTLE_ENDIAN__
  52875. #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52876. int32x4_t __s0 = __p0; \
  52877. int32x4_t __s1 = __p1; \
  52878. int32x4_t __s2 = __p2; \
  52879. int32x4_t __ret; \
  52880. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52881. __ret; \
  52882. })
  52883. #else
  52884. #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52885. int32x4_t __s0 = __p0; \
  52886. int32x4_t __s1 = __p1; \
  52887. int32x4_t __s2 = __p2; \
  52888. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52889. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52890. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52891. int32x4_t __ret; \
  52892. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52893. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52894. __ret; \
  52895. })
  52896. #endif
  52897. #ifdef __LITTLE_ENDIAN__
  52898. #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52899. int16x8_t __s0 = __p0; \
  52900. int16x8_t __s1 = __p1; \
  52901. int16x8_t __s2 = __p2; \
  52902. int16x8_t __ret; \
  52903. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52904. __ret; \
  52905. })
  52906. #else
  52907. #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52908. int16x8_t __s0 = __p0; \
  52909. int16x8_t __s1 = __p1; \
  52910. int16x8_t __s2 = __p2; \
  52911. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  52912. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  52913. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52914. int16x8_t __ret; \
  52915. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
  52916. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  52917. __ret; \
  52918. })
  52919. #endif
  52920. #ifdef __LITTLE_ENDIAN__
  52921. #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52922. uint32x2_t __s0 = __p0; \
  52923. uint32x2_t __s1 = __p1; \
  52924. uint32x4_t __s2 = __p2; \
  52925. uint32x2_t __ret; \
  52926. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52927. __ret; \
  52928. })
  52929. #else
  52930. #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52931. uint32x2_t __s0 = __p0; \
  52932. uint32x2_t __s1 = __p1; \
  52933. uint32x4_t __s2 = __p2; \
  52934. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52935. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52936. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52937. uint32x2_t __ret; \
  52938. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  52939. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52940. __ret; \
  52941. })
  52942. #endif
  52943. #ifdef __LITTLE_ENDIAN__
  52944. #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52945. uint16x4_t __s0 = __p0; \
  52946. uint16x4_t __s1 = __p1; \
  52947. uint16x8_t __s2 = __p2; \
  52948. uint16x4_t __ret; \
  52949. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  52950. __ret; \
  52951. })
  52952. #else
  52953. #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  52954. uint16x4_t __s0 = __p0; \
  52955. uint16x4_t __s1 = __p1; \
  52956. uint16x8_t __s2 = __p2; \
  52957. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  52958. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  52959. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  52960. uint16x4_t __ret; \
  52961. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  52962. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  52963. __ret; \
  52964. })
  52965. #endif
  52966. #ifdef __LITTLE_ENDIAN__
  52967. #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52968. float32x2_t __s0 = __p0; \
  52969. float32x2_t __s1 = __p1; \
  52970. float32x4_t __s2 = __p2; \
  52971. float32x2_t __ret; \
  52972. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52973. __ret; \
  52974. })
  52975. #else
  52976. #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52977. float32x2_t __s0 = __p0; \
  52978. float32x2_t __s1 = __p1; \
  52979. float32x4_t __s2 = __p2; \
  52980. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  52981. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  52982. float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  52983. float32x2_t __ret; \
  52984. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  52985. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  52986. __ret; \
  52987. })
  52988. #endif
  52989. #ifdef __LITTLE_ENDIAN__
  52990. #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  52991. int32x2_t __s0 = __p0; \
  52992. int32x2_t __s1 = __p1; \
  52993. int32x4_t __s2 = __p2; \
  52994. int32x2_t __ret; \
  52995. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
  52996. __ret; \
  52997. })
  52998. #else
  52999. #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53000. int32x2_t __s0 = __p0; \
  53001. int32x2_t __s1 = __p1; \
  53002. int32x4_t __s2 = __p2; \
  53003. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53004. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  53005. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53006. int32x2_t __ret; \
  53007. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
  53008. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53009. __ret; \
  53010. })
  53011. #endif
  53012. #ifdef __LITTLE_ENDIAN__
  53013. #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53014. int16x4_t __s0 = __p0; \
  53015. int16x4_t __s1 = __p1; \
  53016. int16x8_t __s2 = __p2; \
  53017. int16x4_t __ret; \
  53018. __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
  53019. __ret; \
  53020. })
  53021. #else
  53022. #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53023. int16x4_t __s0 = __p0; \
  53024. int16x4_t __s1 = __p1; \
  53025. int16x8_t __s2 = __p2; \
  53026. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53027. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53028. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  53029. int16x4_t __ret; \
  53030. __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
  53031. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53032. __ret; \
  53033. })
  53034. #endif
  53035. #ifdef __LITTLE_ENDIAN__
  53036. __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  53037. float64x2_t __ret;
  53038. __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
  53039. return __ret;
  53040. }
  53041. #else
  53042. __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
  53043. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  53044. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  53045. float64x2_t __ret;
  53046. __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
  53047. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  53048. return __ret;
  53049. }
  53050. #endif
  53051. #ifdef __LITTLE_ENDIAN__
  53052. #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53053. uint64x2_t __s0 = __p0; \
  53054. uint32x4_t __s1 = __p1; \
  53055. uint32x2_t __s2 = __p2; \
  53056. uint64x2_t __ret; \
  53057. __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53058. __ret; \
  53059. })
  53060. #else
  53061. #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53062. uint64x2_t __s0 = __p0; \
  53063. uint32x4_t __s1 = __p1; \
  53064. uint32x2_t __s2 = __p2; \
  53065. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53066. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53067. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  53068. uint64x2_t __ret; \
  53069. __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53070. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53071. __ret; \
  53072. })
  53073. #endif
  53074. #ifdef __LITTLE_ENDIAN__
  53075. #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53076. uint32x4_t __s0 = __p0; \
  53077. uint16x8_t __s1 = __p1; \
  53078. uint16x4_t __s2 = __p2; \
  53079. uint32x4_t __ret; \
  53080. __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53081. __ret; \
  53082. })
  53083. #else
  53084. #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53085. uint32x4_t __s0 = __p0; \
  53086. uint16x8_t __s1 = __p1; \
  53087. uint16x4_t __s2 = __p2; \
  53088. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53089. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53090. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53091. uint32x4_t __ret; \
  53092. __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53093. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53094. __ret; \
  53095. })
  53096. #endif
  53097. #ifdef __LITTLE_ENDIAN__
  53098. #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53099. int64x2_t __s0 = __p0; \
  53100. int32x4_t __s1 = __p1; \
  53101. int32x2_t __s2 = __p2; \
  53102. int64x2_t __ret; \
  53103. __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53104. __ret; \
  53105. })
  53106. #else
  53107. #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53108. int64x2_t __s0 = __p0; \
  53109. int32x4_t __s1 = __p1; \
  53110. int32x2_t __s2 = __p2; \
  53111. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53112. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53113. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  53114. int64x2_t __ret; \
  53115. __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53116. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53117. __ret; \
  53118. })
  53119. #endif
  53120. #ifdef __LITTLE_ENDIAN__
  53121. #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53122. int32x4_t __s0 = __p0; \
  53123. int16x8_t __s1 = __p1; \
  53124. int16x4_t __s2 = __p2; \
  53125. int32x4_t __ret; \
  53126. __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53127. __ret; \
  53128. })
  53129. #else
  53130. #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53131. int32x4_t __s0 = __p0; \
  53132. int16x8_t __s1 = __p1; \
  53133. int16x4_t __s2 = __p2; \
  53134. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53135. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53136. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53137. int32x4_t __ret; \
  53138. __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53139. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53140. __ret; \
  53141. })
  53142. #endif
  53143. #ifdef __LITTLE_ENDIAN__
  53144. #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53145. uint64x2_t __s0 = __p0; \
  53146. uint32x4_t __s1 = __p1; \
  53147. uint32x4_t __s2 = __p2; \
  53148. uint64x2_t __ret; \
  53149. __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53150. __ret; \
  53151. })
  53152. #else
  53153. #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53154. uint64x2_t __s0 = __p0; \
  53155. uint32x4_t __s1 = __p1; \
  53156. uint32x4_t __s2 = __p2; \
  53157. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53158. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53159. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53160. uint64x2_t __ret; \
  53161. __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53162. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53163. __ret; \
  53164. })
  53165. #endif
  53166. #ifdef __LITTLE_ENDIAN__
  53167. #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53168. uint32x4_t __s0 = __p0; \
  53169. uint16x8_t __s1 = __p1; \
  53170. uint16x8_t __s2 = __p2; \
  53171. uint32x4_t __ret; \
  53172. __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53173. __ret; \
  53174. })
  53175. #else
  53176. #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53177. uint32x4_t __s0 = __p0; \
  53178. uint16x8_t __s1 = __p1; \
  53179. uint16x8_t __s2 = __p2; \
  53180. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53181. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53182. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  53183. uint32x4_t __ret; \
  53184. __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53185. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53186. __ret; \
  53187. })
  53188. #endif
  53189. #ifdef __LITTLE_ENDIAN__
  53190. #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53191. int64x2_t __s0 = __p0; \
  53192. int32x4_t __s1 = __p1; \
  53193. int32x4_t __s2 = __p2; \
  53194. int64x2_t __ret; \
  53195. __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53196. __ret; \
  53197. })
  53198. #else
  53199. #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53200. int64x2_t __s0 = __p0; \
  53201. int32x4_t __s1 = __p1; \
  53202. int32x4_t __s2 = __p2; \
  53203. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53204. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53205. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53206. int64x2_t __ret; \
  53207. __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53208. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53209. __ret; \
  53210. })
  53211. #endif
  53212. #ifdef __LITTLE_ENDIAN__
  53213. #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53214. int32x4_t __s0 = __p0; \
  53215. int16x8_t __s1 = __p1; \
  53216. int16x8_t __s2 = __p2; \
  53217. int32x4_t __ret; \
  53218. __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53219. __ret; \
  53220. })
  53221. #else
  53222. #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53223. int32x4_t __s0 = __p0; \
  53224. int16x8_t __s1 = __p1; \
  53225. int16x8_t __s2 = __p2; \
  53226. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53227. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53228. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  53229. int32x4_t __ret; \
  53230. __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53231. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53232. __ret; \
  53233. })
  53234. #endif
  53235. #ifdef __LITTLE_ENDIAN__
  53236. #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53237. uint64x2_t __s0 = __p0; \
  53238. uint32x2_t __s1 = __p1; \
  53239. uint32x4_t __s2 = __p2; \
  53240. uint64x2_t __ret; \
  53241. __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53242. __ret; \
  53243. })
  53244. #else
  53245. #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53246. uint64x2_t __s0 = __p0; \
  53247. uint32x2_t __s1 = __p1; \
  53248. uint32x4_t __s2 = __p2; \
  53249. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53250. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  53251. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53252. uint64x2_t __ret; \
  53253. __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53254. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53255. __ret; \
  53256. })
  53257. #endif
  53258. #ifdef __LITTLE_ENDIAN__
  53259. #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53260. uint32x4_t __s0 = __p0; \
  53261. uint16x4_t __s1 = __p1; \
  53262. uint16x8_t __s2 = __p2; \
  53263. uint32x4_t __ret; \
  53264. __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53265. __ret; \
  53266. })
  53267. #else
  53268. #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53269. uint32x4_t __s0 = __p0; \
  53270. uint16x4_t __s1 = __p1; \
  53271. uint16x8_t __s2 = __p2; \
  53272. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53273. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53274. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  53275. uint32x4_t __ret; \
  53276. __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53277. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53278. __ret; \
  53279. })
  53280. #endif
  53281. #ifdef __LITTLE_ENDIAN__
  53282. #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53283. int64x2_t __s0 = __p0; \
  53284. int32x2_t __s1 = __p1; \
  53285. int32x4_t __s2 = __p2; \
  53286. int64x2_t __ret; \
  53287. __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  53288. __ret; \
  53289. })
  53290. #else
  53291. #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  53292. int64x2_t __s0 = __p0; \
  53293. int32x2_t __s1 = __p1; \
  53294. int32x4_t __s2 = __p2; \
  53295. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53296. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  53297. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  53298. int64x2_t __ret; \
  53299. __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  53300. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53301. __ret; \
  53302. })
  53303. #endif
  53304. #ifdef __LITTLE_ENDIAN__
  53305. #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53306. int32x4_t __s0 = __p0; \
  53307. int16x4_t __s1 = __p1; \
  53308. int16x8_t __s2 = __p2; \
  53309. int32x4_t __ret; \
  53310. __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  53311. __ret; \
  53312. })
  53313. #else
  53314. #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  53315. int32x4_t __s0 = __p0; \
  53316. int16x4_t __s1 = __p1; \
  53317. int16x8_t __s2 = __p2; \
  53318. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53319. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53320. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  53321. int32x4_t __ret; \
  53322. __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  53323. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53324. __ret; \
  53325. })
  53326. #endif
  53327. #ifdef __LITTLE_ENDIAN__
  53328. __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
  53329. poly64x1_t __ret;
  53330. __ret = (poly64x1_t) {__p0};
  53331. return __ret;
  53332. }
  53333. #else
  53334. __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
  53335. poly64x1_t __ret;
  53336. __ret = (poly64x1_t) {__p0};
  53337. return __ret;
  53338. }
  53339. #endif
  53340. #ifdef __LITTLE_ENDIAN__
  53341. __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
  53342. poly64x2_t __ret;
  53343. __ret = (poly64x2_t) {__p0, __p0};
  53344. return __ret;
  53345. }
  53346. #else
  53347. __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
  53348. poly64x2_t __ret;
  53349. __ret = (poly64x2_t) {__p0, __p0};
  53350. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  53351. return __ret;
  53352. }
  53353. #endif
  53354. #ifdef __LITTLE_ENDIAN__
  53355. __ai float64x2_t vmovq_n_f64(float64_t __p0) {
  53356. float64x2_t __ret;
  53357. __ret = (float64x2_t) {__p0, __p0};
  53358. return __ret;
  53359. }
  53360. #else
  53361. __ai float64x2_t vmovq_n_f64(float64_t __p0) {
  53362. float64x2_t __ret;
  53363. __ret = (float64x2_t) {__p0, __p0};
  53364. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  53365. return __ret;
  53366. }
  53367. #endif
  53368. #ifdef __LITTLE_ENDIAN__
  53369. __ai float64x1_t vmov_n_f64(float64_t __p0) {
  53370. float64x1_t __ret;
  53371. __ret = (float64x1_t) {__p0};
  53372. return __ret;
  53373. }
  53374. #else
  53375. __ai float64x1_t vmov_n_f64(float64_t __p0) {
  53376. float64x1_t __ret;
  53377. __ret = (float64x1_t) {__p0};
  53378. return __ret;
  53379. }
  53380. #endif
  53381. #ifdef __LITTLE_ENDIAN__
  53382. __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_124) {
  53383. uint16x8_t __ret_124;
  53384. uint8x8_t __a1_124 = vget_high_u8(__p0_124);
  53385. __ret_124 = (uint16x8_t)(vshll_n_u8(__a1_124, 0));
  53386. return __ret_124;
  53387. }
  53388. #else
  53389. __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_125) {
  53390. uint8x16_t __rev0_125; __rev0_125 = __builtin_shufflevector(__p0_125, __p0_125, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  53391. uint16x8_t __ret_125;
  53392. uint8x8_t __a1_125 = __noswap_vget_high_u8(__rev0_125);
  53393. __ret_125 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_125, 0));
  53394. __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 7, 6, 5, 4, 3, 2, 1, 0);
  53395. return __ret_125;
  53396. }
  53397. __ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_126) {
  53398. uint16x8_t __ret_126;
  53399. uint8x8_t __a1_126 = __noswap_vget_high_u8(__p0_126);
  53400. __ret_126 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_126, 0));
  53401. return __ret_126;
  53402. }
  53403. #endif
  53404. #ifdef __LITTLE_ENDIAN__
  53405. __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_127) {
  53406. uint64x2_t __ret_127;
  53407. uint32x2_t __a1_127 = vget_high_u32(__p0_127);
  53408. __ret_127 = (uint64x2_t)(vshll_n_u32(__a1_127, 0));
  53409. return __ret_127;
  53410. }
  53411. #else
  53412. __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_128) {
  53413. uint32x4_t __rev0_128; __rev0_128 = __builtin_shufflevector(__p0_128, __p0_128, 3, 2, 1, 0);
  53414. uint64x2_t __ret_128;
  53415. uint32x2_t __a1_128 = __noswap_vget_high_u32(__rev0_128);
  53416. __ret_128 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_128, 0));
  53417. __ret_128 = __builtin_shufflevector(__ret_128, __ret_128, 1, 0);
  53418. return __ret_128;
  53419. }
  53420. __ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_129) {
  53421. uint64x2_t __ret_129;
  53422. uint32x2_t __a1_129 = __noswap_vget_high_u32(__p0_129);
  53423. __ret_129 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_129, 0));
  53424. return __ret_129;
  53425. }
  53426. #endif
  53427. #ifdef __LITTLE_ENDIAN__
  53428. __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_130) {
  53429. uint32x4_t __ret_130;
  53430. uint16x4_t __a1_130 = vget_high_u16(__p0_130);
  53431. __ret_130 = (uint32x4_t)(vshll_n_u16(__a1_130, 0));
  53432. return __ret_130;
  53433. }
  53434. #else
  53435. __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_131) {
  53436. uint16x8_t __rev0_131; __rev0_131 = __builtin_shufflevector(__p0_131, __p0_131, 7, 6, 5, 4, 3, 2, 1, 0);
  53437. uint32x4_t __ret_131;
  53438. uint16x4_t __a1_131 = __noswap_vget_high_u16(__rev0_131);
  53439. __ret_131 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_131, 0));
  53440. __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0);
  53441. return __ret_131;
  53442. }
  53443. __ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_132) {
  53444. uint32x4_t __ret_132;
  53445. uint16x4_t __a1_132 = __noswap_vget_high_u16(__p0_132);
  53446. __ret_132 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_132, 0));
  53447. return __ret_132;
  53448. }
  53449. #endif
  53450. #ifdef __LITTLE_ENDIAN__
  53451. __ai int16x8_t vmovl_high_s8(int8x16_t __p0_133) {
  53452. int16x8_t __ret_133;
  53453. int8x8_t __a1_133 = vget_high_s8(__p0_133);
  53454. __ret_133 = (int16x8_t)(vshll_n_s8(__a1_133, 0));
  53455. return __ret_133;
  53456. }
  53457. #else
  53458. __ai int16x8_t vmovl_high_s8(int8x16_t __p0_134) {
  53459. int8x16_t __rev0_134; __rev0_134 = __builtin_shufflevector(__p0_134, __p0_134, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  53460. int16x8_t __ret_134;
  53461. int8x8_t __a1_134 = __noswap_vget_high_s8(__rev0_134);
  53462. __ret_134 = (int16x8_t)(__noswap_vshll_n_s8(__a1_134, 0));
  53463. __ret_134 = __builtin_shufflevector(__ret_134, __ret_134, 7, 6, 5, 4, 3, 2, 1, 0);
  53464. return __ret_134;
  53465. }
  53466. __ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_135) {
  53467. int16x8_t __ret_135;
  53468. int8x8_t __a1_135 = __noswap_vget_high_s8(__p0_135);
  53469. __ret_135 = (int16x8_t)(__noswap_vshll_n_s8(__a1_135, 0));
  53470. return __ret_135;
  53471. }
  53472. #endif
  53473. #ifdef __LITTLE_ENDIAN__
  53474. __ai int64x2_t vmovl_high_s32(int32x4_t __p0_136) {
  53475. int64x2_t __ret_136;
  53476. int32x2_t __a1_136 = vget_high_s32(__p0_136);
  53477. __ret_136 = (int64x2_t)(vshll_n_s32(__a1_136, 0));
  53478. return __ret_136;
  53479. }
  53480. #else
  53481. __ai int64x2_t vmovl_high_s32(int32x4_t __p0_137) {
  53482. int32x4_t __rev0_137; __rev0_137 = __builtin_shufflevector(__p0_137, __p0_137, 3, 2, 1, 0);
  53483. int64x2_t __ret_137;
  53484. int32x2_t __a1_137 = __noswap_vget_high_s32(__rev0_137);
  53485. __ret_137 = (int64x2_t)(__noswap_vshll_n_s32(__a1_137, 0));
  53486. __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 1, 0);
  53487. return __ret_137;
  53488. }
  53489. __ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_138) {
  53490. int64x2_t __ret_138;
  53491. int32x2_t __a1_138 = __noswap_vget_high_s32(__p0_138);
  53492. __ret_138 = (int64x2_t)(__noswap_vshll_n_s32(__a1_138, 0));
  53493. return __ret_138;
  53494. }
  53495. #endif
  53496. #ifdef __LITTLE_ENDIAN__
  53497. __ai int32x4_t vmovl_high_s16(int16x8_t __p0_139) {
  53498. int32x4_t __ret_139;
  53499. int16x4_t __a1_139 = vget_high_s16(__p0_139);
  53500. __ret_139 = (int32x4_t)(vshll_n_s16(__a1_139, 0));
  53501. return __ret_139;
  53502. }
  53503. #else
  53504. __ai int32x4_t vmovl_high_s16(int16x8_t __p0_140) {
  53505. int16x8_t __rev0_140; __rev0_140 = __builtin_shufflevector(__p0_140, __p0_140, 7, 6, 5, 4, 3, 2, 1, 0);
  53506. int32x4_t __ret_140;
  53507. int16x4_t __a1_140 = __noswap_vget_high_s16(__rev0_140);
  53508. __ret_140 = (int32x4_t)(__noswap_vshll_n_s16(__a1_140, 0));
  53509. __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0);
  53510. return __ret_140;
  53511. }
  53512. __ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_141) {
  53513. int32x4_t __ret_141;
  53514. int16x4_t __a1_141 = __noswap_vget_high_s16(__p0_141);
  53515. __ret_141 = (int32x4_t)(__noswap_vshll_n_s16(__a1_141, 0));
  53516. return __ret_141;
  53517. }
  53518. #endif
  53519. #ifdef __LITTLE_ENDIAN__
  53520. __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
  53521. uint16x8_t __ret;
  53522. __ret = vcombine_u16(__p0, vmovn_u32(__p1));
  53523. return __ret;
  53524. }
  53525. #else
  53526. __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
  53527. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  53528. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  53529. uint16x8_t __ret;
  53530. __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
  53531. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  53532. return __ret;
  53533. }
  53534. #endif
  53535. #ifdef __LITTLE_ENDIAN__
  53536. __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
  53537. uint32x4_t __ret;
  53538. __ret = vcombine_u32(__p0, vmovn_u64(__p1));
  53539. return __ret;
  53540. }
  53541. #else
  53542. __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
  53543. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  53544. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  53545. uint32x4_t __ret;
  53546. __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
  53547. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  53548. return __ret;
  53549. }
  53550. #endif
  53551. #ifdef __LITTLE_ENDIAN__
  53552. __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
  53553. uint8x16_t __ret;
  53554. __ret = vcombine_u8(__p0, vmovn_u16(__p1));
  53555. return __ret;
  53556. }
  53557. #else
  53558. __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
  53559. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  53560. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  53561. uint8x16_t __ret;
  53562. __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
  53563. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  53564. return __ret;
  53565. }
  53566. #endif
  53567. #ifdef __LITTLE_ENDIAN__
  53568. __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
  53569. int16x8_t __ret;
  53570. __ret = vcombine_s16(__p0, vmovn_s32(__p1));
  53571. return __ret;
  53572. }
  53573. #else
  53574. __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
  53575. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  53576. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  53577. int16x8_t __ret;
  53578. __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
  53579. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  53580. return __ret;
  53581. }
  53582. #endif
  53583. #ifdef __LITTLE_ENDIAN__
  53584. __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
  53585. int32x4_t __ret;
  53586. __ret = vcombine_s32(__p0, vmovn_s64(__p1));
  53587. return __ret;
  53588. }
  53589. #else
  53590. __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
  53591. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  53592. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  53593. int32x4_t __ret;
  53594. __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
  53595. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  53596. return __ret;
  53597. }
  53598. #endif
  53599. #ifdef __LITTLE_ENDIAN__
  53600. __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
  53601. int8x16_t __ret;
  53602. __ret = vcombine_s8(__p0, vmovn_s16(__p1));
  53603. return __ret;
  53604. }
  53605. #else
  53606. __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
  53607. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  53608. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  53609. int8x16_t __ret;
  53610. __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
  53611. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  53612. return __ret;
  53613. }
  53614. #endif
  53615. #ifdef __LITTLE_ENDIAN__
  53616. __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
  53617. float64x2_t __ret;
  53618. __ret = __p0 * __p1;
  53619. return __ret;
  53620. }
  53621. #else
  53622. __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
  53623. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  53624. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  53625. float64x2_t __ret;
  53626. __ret = __rev0 * __rev1;
  53627. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  53628. return __ret;
  53629. }
  53630. #endif
  53631. #ifdef __LITTLE_ENDIAN__
  53632. __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
  53633. float64x1_t __ret;
  53634. __ret = __p0 * __p1;
  53635. return __ret;
  53636. }
  53637. #else
  53638. __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
  53639. float64x1_t __ret;
  53640. __ret = __p0 * __p1;
  53641. return __ret;
  53642. }
  53643. #endif
  53644. #ifdef __LITTLE_ENDIAN__
  53645. #define vmuld_lane_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
  53646. float64_t __s0_142 = __p0_142; \
  53647. float64x1_t __s1_142 = __p1_142; \
  53648. float64_t __ret_142; \
  53649. __ret_142 = __s0_142 * vget_lane_f64(__s1_142, __p2_142); \
  53650. __ret_142; \
  53651. })
  53652. #else
  53653. #define vmuld_lane_f64(__p0_143, __p1_143, __p2_143) __extension__ ({ \
  53654. float64_t __s0_143 = __p0_143; \
  53655. float64x1_t __s1_143 = __p1_143; \
  53656. float64_t __ret_143; \
  53657. __ret_143 = __s0_143 * __noswap_vget_lane_f64(__s1_143, __p2_143); \
  53658. __ret_143; \
  53659. })
  53660. #endif
  53661. #ifdef __LITTLE_ENDIAN__
  53662. #define vmuls_lane_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
  53663. float32_t __s0_144 = __p0_144; \
  53664. float32x2_t __s1_144 = __p1_144; \
  53665. float32_t __ret_144; \
  53666. __ret_144 = __s0_144 * vget_lane_f32(__s1_144, __p2_144); \
  53667. __ret_144; \
  53668. })
  53669. #else
  53670. #define vmuls_lane_f32(__p0_145, __p1_145, __p2_145) __extension__ ({ \
  53671. float32_t __s0_145 = __p0_145; \
  53672. float32x2_t __s1_145 = __p1_145; \
  53673. float32x2_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 1, 0); \
  53674. float32_t __ret_145; \
  53675. __ret_145 = __s0_145 * __noswap_vget_lane_f32(__rev1_145, __p2_145); \
  53676. __ret_145; \
  53677. })
  53678. #endif
  53679. #ifdef __LITTLE_ENDIAN__
  53680. #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  53681. float64x1_t __s0 = __p0; \
  53682. float64x1_t __s1 = __p1; \
  53683. float64x1_t __ret; \
  53684. __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
  53685. __ret; \
  53686. })
  53687. #else
  53688. #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  53689. float64x1_t __s0 = __p0; \
  53690. float64x1_t __s1 = __p1; \
  53691. float64x1_t __ret; \
  53692. __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
  53693. __ret; \
  53694. })
  53695. #endif
  53696. #ifdef __LITTLE_ENDIAN__
  53697. #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  53698. float64x2_t __s0 = __p0; \
  53699. float64x1_t __s1 = __p1; \
  53700. float64x2_t __ret; \
  53701. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53702. __ret; \
  53703. })
  53704. #else
  53705. #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  53706. float64x2_t __s0 = __p0; \
  53707. float64x1_t __s1 = __p1; \
  53708. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53709. float64x2_t __ret; \
  53710. __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53711. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53712. __ret; \
  53713. })
  53714. #endif
  53715. #ifdef __LITTLE_ENDIAN__
  53716. #define vmuld_laneq_f64(__p0_146, __p1_146, __p2_146) __extension__ ({ \
  53717. float64_t __s0_146 = __p0_146; \
  53718. float64x2_t __s1_146 = __p1_146; \
  53719. float64_t __ret_146; \
  53720. __ret_146 = __s0_146 * vgetq_lane_f64(__s1_146, __p2_146); \
  53721. __ret_146; \
  53722. })
  53723. #else
  53724. #define vmuld_laneq_f64(__p0_147, __p1_147, __p2_147) __extension__ ({ \
  53725. float64_t __s0_147 = __p0_147; \
  53726. float64x2_t __s1_147 = __p1_147; \
  53727. float64x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
  53728. float64_t __ret_147; \
  53729. __ret_147 = __s0_147 * __noswap_vgetq_lane_f64(__rev1_147, __p2_147); \
  53730. __ret_147; \
  53731. })
  53732. #endif
  53733. #ifdef __LITTLE_ENDIAN__
  53734. #define vmuls_laneq_f32(__p0_148, __p1_148, __p2_148) __extension__ ({ \
  53735. float32_t __s0_148 = __p0_148; \
  53736. float32x4_t __s1_148 = __p1_148; \
  53737. float32_t __ret_148; \
  53738. __ret_148 = __s0_148 * vgetq_lane_f32(__s1_148, __p2_148); \
  53739. __ret_148; \
  53740. })
  53741. #else
  53742. #define vmuls_laneq_f32(__p0_149, __p1_149, __p2_149) __extension__ ({ \
  53743. float32_t __s0_149 = __p0_149; \
  53744. float32x4_t __s1_149 = __p1_149; \
  53745. float32x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
  53746. float32_t __ret_149; \
  53747. __ret_149 = __s0_149 * __noswap_vgetq_lane_f32(__rev1_149, __p2_149); \
  53748. __ret_149; \
  53749. })
  53750. #endif
  53751. #ifdef __LITTLE_ENDIAN__
  53752. #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  53753. float64x1_t __s0 = __p0; \
  53754. float64x2_t __s1 = __p1; \
  53755. float64x1_t __ret; \
  53756. __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
  53757. __ret; \
  53758. })
  53759. #else
  53760. #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  53761. float64x1_t __s0 = __p0; \
  53762. float64x2_t __s1 = __p1; \
  53763. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  53764. float64x1_t __ret; \
  53765. __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
  53766. __ret; \
  53767. })
  53768. #endif
  53769. #ifdef __LITTLE_ENDIAN__
  53770. #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  53771. uint32x4_t __s0 = __p0; \
  53772. uint32x4_t __s1 = __p1; \
  53773. uint32x4_t __ret; \
  53774. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  53775. __ret; \
  53776. })
  53777. #else
  53778. #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  53779. uint32x4_t __s0 = __p0; \
  53780. uint32x4_t __s1 = __p1; \
  53781. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53782. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53783. uint32x4_t __ret; \
  53784. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  53785. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53786. __ret; \
  53787. })
  53788. #endif
  53789. #ifdef __LITTLE_ENDIAN__
  53790. #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  53791. uint16x8_t __s0 = __p0; \
  53792. uint16x8_t __s1 = __p1; \
  53793. uint16x8_t __ret; \
  53794. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  53795. __ret; \
  53796. })
  53797. #else
  53798. #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  53799. uint16x8_t __s0 = __p0; \
  53800. uint16x8_t __s1 = __p1; \
  53801. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  53802. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53803. uint16x8_t __ret; \
  53804. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  53805. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  53806. __ret; \
  53807. })
  53808. #endif
  53809. #ifdef __LITTLE_ENDIAN__
  53810. #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  53811. float64x2_t __s0 = __p0; \
  53812. float64x2_t __s1 = __p1; \
  53813. float64x2_t __ret; \
  53814. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53815. __ret; \
  53816. })
  53817. #else
  53818. #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  53819. float64x2_t __s0 = __p0; \
  53820. float64x2_t __s1 = __p1; \
  53821. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53822. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  53823. float64x2_t __ret; \
  53824. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  53825. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53826. __ret; \
  53827. })
  53828. #endif
  53829. #ifdef __LITTLE_ENDIAN__
  53830. #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  53831. float32x4_t __s0 = __p0; \
  53832. float32x4_t __s1 = __p1; \
  53833. float32x4_t __ret; \
  53834. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  53835. __ret; \
  53836. })
  53837. #else
  53838. #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  53839. float32x4_t __s0 = __p0; \
  53840. float32x4_t __s1 = __p1; \
  53841. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53842. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53843. float32x4_t __ret; \
  53844. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  53845. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53846. __ret; \
  53847. })
  53848. #endif
  53849. #ifdef __LITTLE_ENDIAN__
  53850. #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  53851. int32x4_t __s0 = __p0; \
  53852. int32x4_t __s1 = __p1; \
  53853. int32x4_t __ret; \
  53854. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  53855. __ret; \
  53856. })
  53857. #else
  53858. #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  53859. int32x4_t __s0 = __p0; \
  53860. int32x4_t __s1 = __p1; \
  53861. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53862. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53863. int32x4_t __ret; \
  53864. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  53865. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53866. __ret; \
  53867. })
  53868. #endif
  53869. #ifdef __LITTLE_ENDIAN__
  53870. #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  53871. int16x8_t __s0 = __p0; \
  53872. int16x8_t __s1 = __p1; \
  53873. int16x8_t __ret; \
  53874. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  53875. __ret; \
  53876. })
  53877. #else
  53878. #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  53879. int16x8_t __s0 = __p0; \
  53880. int16x8_t __s1 = __p1; \
  53881. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  53882. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53883. int16x8_t __ret; \
  53884. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
  53885. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  53886. __ret; \
  53887. })
  53888. #endif
  53889. #ifdef __LITTLE_ENDIAN__
  53890. #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  53891. uint32x2_t __s0 = __p0; \
  53892. uint32x4_t __s1 = __p1; \
  53893. uint32x2_t __ret; \
  53894. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53895. __ret; \
  53896. })
  53897. #else
  53898. #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  53899. uint32x2_t __s0 = __p0; \
  53900. uint32x4_t __s1 = __p1; \
  53901. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53902. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53903. uint32x2_t __ret; \
  53904. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  53905. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53906. __ret; \
  53907. })
  53908. #endif
  53909. #ifdef __LITTLE_ENDIAN__
  53910. #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  53911. uint16x4_t __s0 = __p0; \
  53912. uint16x8_t __s1 = __p1; \
  53913. uint16x4_t __ret; \
  53914. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  53915. __ret; \
  53916. })
  53917. #else
  53918. #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  53919. uint16x4_t __s0 = __p0; \
  53920. uint16x8_t __s1 = __p1; \
  53921. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53922. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53923. uint16x4_t __ret; \
  53924. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  53925. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53926. __ret; \
  53927. })
  53928. #endif
  53929. #ifdef __LITTLE_ENDIAN__
  53930. #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  53931. float32x2_t __s0 = __p0; \
  53932. float32x4_t __s1 = __p1; \
  53933. float32x2_t __ret; \
  53934. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53935. __ret; \
  53936. })
  53937. #else
  53938. #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  53939. float32x2_t __s0 = __p0; \
  53940. float32x4_t __s1 = __p1; \
  53941. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53942. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53943. float32x2_t __ret; \
  53944. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  53945. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53946. __ret; \
  53947. })
  53948. #endif
  53949. #ifdef __LITTLE_ENDIAN__
  53950. #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  53951. int32x2_t __s0 = __p0; \
  53952. int32x4_t __s1 = __p1; \
  53953. int32x2_t __ret; \
  53954. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
  53955. __ret; \
  53956. })
  53957. #else
  53958. #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  53959. int32x2_t __s0 = __p0; \
  53960. int32x4_t __s1 = __p1; \
  53961. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  53962. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  53963. int32x2_t __ret; \
  53964. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
  53965. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  53966. __ret; \
  53967. })
  53968. #endif
  53969. #ifdef __LITTLE_ENDIAN__
  53970. #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  53971. int16x4_t __s0 = __p0; \
  53972. int16x8_t __s1 = __p1; \
  53973. int16x4_t __ret; \
  53974. __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
  53975. __ret; \
  53976. })
  53977. #else
  53978. #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  53979. int16x4_t __s0 = __p0; \
  53980. int16x8_t __s1 = __p1; \
  53981. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  53982. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  53983. int16x4_t __ret; \
  53984. __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
  53985. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  53986. __ret; \
  53987. })
  53988. #endif
  53989. #ifdef __LITTLE_ENDIAN__
  53990. __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
  53991. float64x1_t __ret;
  53992. __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
  53993. return __ret;
  53994. }
  53995. #else
  53996. __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
  53997. float64x1_t __ret;
  53998. __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
  53999. return __ret;
  54000. }
  54001. #endif
  54002. #ifdef __LITTLE_ENDIAN__
  54003. __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
  54004. float64x2_t __ret;
  54005. __ret = __p0 * (float64x2_t) {__p1, __p1};
  54006. return __ret;
  54007. }
  54008. #else
  54009. __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
  54010. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54011. float64x2_t __ret;
  54012. __ret = __rev0 * (float64x2_t) {__p1, __p1};
  54013. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54014. return __ret;
  54015. }
  54016. #endif
  54017. #ifdef __LITTLE_ENDIAN__
  54018. __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
  54019. poly128_t __ret;
  54020. __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
  54021. return __ret;
  54022. }
  54023. #else
  54024. __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
  54025. poly128_t __ret;
  54026. __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
  54027. return __ret;
  54028. }
  54029. __ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) {
  54030. poly128_t __ret;
  54031. __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
  54032. return __ret;
  54033. }
  54034. #endif
  54035. #ifdef __LITTLE_ENDIAN__
  54036. __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
  54037. poly16x8_t __ret;
  54038. __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
  54039. return __ret;
  54040. }
  54041. #else
  54042. __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
  54043. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54044. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54045. poly16x8_t __ret;
  54046. __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
  54047. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  54048. return __ret;
  54049. }
  54050. #endif
  54051. #ifdef __LITTLE_ENDIAN__
  54052. __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  54053. uint16x8_t __ret;
  54054. __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
  54055. return __ret;
  54056. }
  54057. #else
  54058. __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  54059. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54060. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54061. uint16x8_t __ret;
  54062. __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
  54063. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  54064. return __ret;
  54065. }
  54066. #endif
  54067. #ifdef __LITTLE_ENDIAN__
  54068. __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  54069. uint64x2_t __ret;
  54070. __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
  54071. return __ret;
  54072. }
  54073. #else
  54074. __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  54075. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54076. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54077. uint64x2_t __ret;
  54078. __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
  54079. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54080. return __ret;
  54081. }
  54082. #endif
  54083. #ifdef __LITTLE_ENDIAN__
  54084. __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  54085. uint32x4_t __ret;
  54086. __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
  54087. return __ret;
  54088. }
  54089. #else
  54090. __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  54091. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54092. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  54093. uint32x4_t __ret;
  54094. __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
  54095. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54096. return __ret;
  54097. }
  54098. #endif
  54099. #ifdef __LITTLE_ENDIAN__
  54100. __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
  54101. int16x8_t __ret;
  54102. __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
  54103. return __ret;
  54104. }
  54105. #else
  54106. __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
  54107. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54108. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54109. int16x8_t __ret;
  54110. __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
  54111. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  54112. return __ret;
  54113. }
  54114. #endif
  54115. #ifdef __LITTLE_ENDIAN__
  54116. __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
  54117. int64x2_t __ret;
  54118. __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
  54119. return __ret;
  54120. }
  54121. #else
  54122. __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
  54123. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54124. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54125. int64x2_t __ret;
  54126. __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
  54127. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54128. return __ret;
  54129. }
  54130. #endif
  54131. #ifdef __LITTLE_ENDIAN__
  54132. __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
  54133. int32x4_t __ret;
  54134. __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
  54135. return __ret;
  54136. }
  54137. #else
  54138. __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
  54139. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54140. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  54141. int32x4_t __ret;
  54142. __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
  54143. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54144. return __ret;
  54145. }
  54146. #endif
  54147. #ifdef __LITTLE_ENDIAN__
  54148. __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
  54149. poly128_t __ret;
  54150. __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
  54151. return __ret;
  54152. }
  54153. #else
  54154. __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
  54155. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54156. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54157. poly128_t __ret;
  54158. __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
  54159. return __ret;
  54160. }
  54161. #endif
  54162. #ifdef __LITTLE_ENDIAN__
  54163. #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  54164. uint32x4_t __s0 = __p0; \
  54165. uint32x2_t __s1 = __p1; \
  54166. uint64x2_t __ret; \
  54167. __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54168. __ret; \
  54169. })
  54170. #else
  54171. #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
  54172. uint32x4_t __s0 = __p0; \
  54173. uint32x2_t __s1 = __p1; \
  54174. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54175. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  54176. uint64x2_t __ret; \
  54177. __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54178. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54179. __ret; \
  54180. })
  54181. #endif
  54182. #ifdef __LITTLE_ENDIAN__
  54183. #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  54184. uint16x8_t __s0 = __p0; \
  54185. uint16x4_t __s1 = __p1; \
  54186. uint32x4_t __ret; \
  54187. __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54188. __ret; \
  54189. })
  54190. #else
  54191. #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
  54192. uint16x8_t __s0 = __p0; \
  54193. uint16x4_t __s1 = __p1; \
  54194. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  54195. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54196. uint32x4_t __ret; \
  54197. __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54198. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54199. __ret; \
  54200. })
  54201. #endif
  54202. #ifdef __LITTLE_ENDIAN__
  54203. #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  54204. int32x4_t __s0 = __p0; \
  54205. int32x2_t __s1 = __p1; \
  54206. int64x2_t __ret; \
  54207. __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54208. __ret; \
  54209. })
  54210. #else
  54211. #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  54212. int32x4_t __s0 = __p0; \
  54213. int32x2_t __s1 = __p1; \
  54214. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54215. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  54216. int64x2_t __ret; \
  54217. __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54218. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54219. __ret; \
  54220. })
  54221. #endif
  54222. #ifdef __LITTLE_ENDIAN__
  54223. #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  54224. int16x8_t __s0 = __p0; \
  54225. int16x4_t __s1 = __p1; \
  54226. int32x4_t __ret; \
  54227. __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54228. __ret; \
  54229. })
  54230. #else
  54231. #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  54232. int16x8_t __s0 = __p0; \
  54233. int16x4_t __s1 = __p1; \
  54234. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  54235. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54236. int32x4_t __ret; \
  54237. __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54238. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54239. __ret; \
  54240. })
  54241. #endif
  54242. #ifdef __LITTLE_ENDIAN__
  54243. #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  54244. uint32x4_t __s0 = __p0; \
  54245. uint32x4_t __s1 = __p1; \
  54246. uint64x2_t __ret; \
  54247. __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54248. __ret; \
  54249. })
  54250. #else
  54251. #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  54252. uint32x4_t __s0 = __p0; \
  54253. uint32x4_t __s1 = __p1; \
  54254. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54255. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54256. uint64x2_t __ret; \
  54257. __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54258. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54259. __ret; \
  54260. })
  54261. #endif
  54262. #ifdef __LITTLE_ENDIAN__
  54263. #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  54264. uint16x8_t __s0 = __p0; \
  54265. uint16x8_t __s1 = __p1; \
  54266. uint32x4_t __ret; \
  54267. __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54268. __ret; \
  54269. })
  54270. #else
  54271. #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  54272. uint16x8_t __s0 = __p0; \
  54273. uint16x8_t __s1 = __p1; \
  54274. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  54275. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  54276. uint32x4_t __ret; \
  54277. __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54278. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54279. __ret; \
  54280. })
  54281. #endif
  54282. #ifdef __LITTLE_ENDIAN__
  54283. #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  54284. int32x4_t __s0 = __p0; \
  54285. int32x4_t __s1 = __p1; \
  54286. int64x2_t __ret; \
  54287. __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54288. __ret; \
  54289. })
  54290. #else
  54291. #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  54292. int32x4_t __s0 = __p0; \
  54293. int32x4_t __s1 = __p1; \
  54294. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54295. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54296. int64x2_t __ret; \
  54297. __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54298. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54299. __ret; \
  54300. })
  54301. #endif
  54302. #ifdef __LITTLE_ENDIAN__
  54303. #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  54304. int16x8_t __s0 = __p0; \
  54305. int16x8_t __s1 = __p1; \
  54306. int32x4_t __ret; \
  54307. __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54308. __ret; \
  54309. })
  54310. #else
  54311. #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  54312. int16x8_t __s0 = __p0; \
  54313. int16x8_t __s1 = __p1; \
  54314. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  54315. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  54316. int32x4_t __ret; \
  54317. __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54318. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54319. __ret; \
  54320. })
  54321. #endif
  54322. #ifdef __LITTLE_ENDIAN__
  54323. __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
  54324. uint64x2_t __ret;
  54325. __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
  54326. return __ret;
  54327. }
  54328. #else
  54329. __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
  54330. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54331. uint64x2_t __ret;
  54332. __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
  54333. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54334. return __ret;
  54335. }
  54336. #endif
  54337. #ifdef __LITTLE_ENDIAN__
  54338. __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
  54339. uint32x4_t __ret;
  54340. __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
  54341. return __ret;
  54342. }
  54343. #else
  54344. __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
  54345. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54346. uint32x4_t __ret;
  54347. __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
  54348. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54349. return __ret;
  54350. }
  54351. #endif
  54352. #ifdef __LITTLE_ENDIAN__
  54353. __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
  54354. int64x2_t __ret;
  54355. __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
  54356. return __ret;
  54357. }
  54358. #else
  54359. __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
  54360. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54361. int64x2_t __ret;
  54362. __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
  54363. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54364. return __ret;
  54365. }
  54366. #endif
  54367. #ifdef __LITTLE_ENDIAN__
  54368. __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
  54369. int32x4_t __ret;
  54370. __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
  54371. return __ret;
  54372. }
  54373. #else
  54374. __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
  54375. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54376. int32x4_t __ret;
  54377. __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
  54378. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54379. return __ret;
  54380. }
  54381. #endif
  54382. #ifdef __LITTLE_ENDIAN__
  54383. #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  54384. uint32x2_t __s0 = __p0; \
  54385. uint32x4_t __s1 = __p1; \
  54386. uint64x2_t __ret; \
  54387. __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54388. __ret; \
  54389. })
  54390. #else
  54391. #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
  54392. uint32x2_t __s0 = __p0; \
  54393. uint32x4_t __s1 = __p1; \
  54394. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54395. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54396. uint64x2_t __ret; \
  54397. __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54398. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54399. __ret; \
  54400. })
  54401. #endif
  54402. #ifdef __LITTLE_ENDIAN__
  54403. #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  54404. uint16x4_t __s0 = __p0; \
  54405. uint16x8_t __s1 = __p1; \
  54406. uint32x4_t __ret; \
  54407. __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54408. __ret; \
  54409. })
  54410. #else
  54411. #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
  54412. uint16x4_t __s0 = __p0; \
  54413. uint16x8_t __s1 = __p1; \
  54414. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54415. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  54416. uint32x4_t __ret; \
  54417. __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54418. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54419. __ret; \
  54420. })
  54421. #endif
  54422. #ifdef __LITTLE_ENDIAN__
  54423. #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  54424. int32x2_t __s0 = __p0; \
  54425. int32x4_t __s1 = __p1; \
  54426. int64x2_t __ret; \
  54427. __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54428. __ret; \
  54429. })
  54430. #else
  54431. #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  54432. int32x2_t __s0 = __p0; \
  54433. int32x4_t __s1 = __p1; \
  54434. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54435. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54436. int64x2_t __ret; \
  54437. __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54438. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54439. __ret; \
  54440. })
  54441. #endif
  54442. #ifdef __LITTLE_ENDIAN__
  54443. #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  54444. int16x4_t __s0 = __p0; \
  54445. int16x8_t __s1 = __p1; \
  54446. int32x4_t __ret; \
  54447. __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54448. __ret; \
  54449. })
  54450. #else
  54451. #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  54452. int16x4_t __s0 = __p0; \
  54453. int16x8_t __s1 = __p1; \
  54454. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54455. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  54456. int32x4_t __ret; \
  54457. __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54458. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54459. __ret; \
  54460. })
  54461. #endif
  54462. #ifdef __LITTLE_ENDIAN__
  54463. __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
  54464. float64x2_t __ret;
  54465. __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  54466. return __ret;
  54467. }
  54468. #else
  54469. __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
  54470. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54471. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54472. float64x2_t __ret;
  54473. __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  54474. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54475. return __ret;
  54476. }
  54477. __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
  54478. float64x2_t __ret;
  54479. __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  54480. return __ret;
  54481. }
  54482. #endif
  54483. #ifdef __LITTLE_ENDIAN__
  54484. __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
  54485. float32x4_t __ret;
  54486. __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  54487. return __ret;
  54488. }
  54489. #else
  54490. __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
  54491. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54492. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54493. float32x4_t __ret;
  54494. __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  54495. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54496. return __ret;
  54497. }
  54498. __ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
  54499. float32x4_t __ret;
  54500. __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  54501. return __ret;
  54502. }
  54503. #endif
  54504. #ifdef __LITTLE_ENDIAN__
  54505. __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
  54506. float64x1_t __ret;
  54507. __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  54508. return __ret;
  54509. }
  54510. #else
  54511. __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
  54512. float64x1_t __ret;
  54513. __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  54514. return __ret;
  54515. }
  54516. #endif
  54517. #ifdef __LITTLE_ENDIAN__
  54518. __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
  54519. float32x2_t __ret;
  54520. __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  54521. return __ret;
  54522. }
  54523. #else
  54524. __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
  54525. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54526. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54527. float32x2_t __ret;
  54528. __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  54529. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54530. return __ret;
  54531. }
  54532. __ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
  54533. float32x2_t __ret;
  54534. __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  54535. return __ret;
  54536. }
  54537. #endif
  54538. #ifdef __LITTLE_ENDIAN__
  54539. __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
  54540. float64_t __ret;
  54541. __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
  54542. return __ret;
  54543. }
  54544. #else
  54545. __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
  54546. float64_t __ret;
  54547. __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
  54548. return __ret;
  54549. }
  54550. __ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) {
  54551. float64_t __ret;
  54552. __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
  54553. return __ret;
  54554. }
  54555. #endif
  54556. #ifdef __LITTLE_ENDIAN__
  54557. __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
  54558. float32_t __ret;
  54559. __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
  54560. return __ret;
  54561. }
  54562. #else
  54563. __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
  54564. float32_t __ret;
  54565. __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
  54566. return __ret;
  54567. }
  54568. __ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) {
  54569. float32_t __ret;
  54570. __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
  54571. return __ret;
  54572. }
  54573. #endif
  54574. #ifdef __LITTLE_ENDIAN__
  54575. #define vmulxd_lane_f64(__p0_150, __p1_150, __p2_150) __extension__ ({ \
  54576. float64_t __s0_150 = __p0_150; \
  54577. float64x1_t __s1_150 = __p1_150; \
  54578. float64_t __ret_150; \
  54579. __ret_150 = vmulxd_f64(__s0_150, vget_lane_f64(__s1_150, __p2_150)); \
  54580. __ret_150; \
  54581. })
  54582. #else
  54583. #define vmulxd_lane_f64(__p0_151, __p1_151, __p2_151) __extension__ ({ \
  54584. float64_t __s0_151 = __p0_151; \
  54585. float64x1_t __s1_151 = __p1_151; \
  54586. float64_t __ret_151; \
  54587. __ret_151 = __noswap_vmulxd_f64(__s0_151, __noswap_vget_lane_f64(__s1_151, __p2_151)); \
  54588. __ret_151; \
  54589. })
  54590. #endif
  54591. #ifdef __LITTLE_ENDIAN__
  54592. #define vmulxs_lane_f32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
  54593. float32_t __s0_152 = __p0_152; \
  54594. float32x2_t __s1_152 = __p1_152; \
  54595. float32_t __ret_152; \
  54596. __ret_152 = vmulxs_f32(__s0_152, vget_lane_f32(__s1_152, __p2_152)); \
  54597. __ret_152; \
  54598. })
  54599. #else
  54600. #define vmulxs_lane_f32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
  54601. float32_t __s0_153 = __p0_153; \
  54602. float32x2_t __s1_153 = __p1_153; \
  54603. float32x2_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
  54604. float32_t __ret_153; \
  54605. __ret_153 = __noswap_vmulxs_f32(__s0_153, __noswap_vget_lane_f32(__rev1_153, __p2_153)); \
  54606. __ret_153; \
  54607. })
  54608. #endif
  54609. #ifdef __LITTLE_ENDIAN__
  54610. #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  54611. float64x2_t __s0 = __p0; \
  54612. float64x1_t __s1 = __p1; \
  54613. float64x2_t __ret; \
  54614. __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54615. __ret; \
  54616. })
  54617. #else
  54618. #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  54619. float64x2_t __s0 = __p0; \
  54620. float64x1_t __s1 = __p1; \
  54621. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54622. float64x2_t __ret; \
  54623. __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54624. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54625. __ret; \
  54626. })
  54627. #endif
  54628. #ifdef __LITTLE_ENDIAN__
  54629. #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  54630. float32x4_t __s0 = __p0; \
  54631. float32x2_t __s1 = __p1; \
  54632. float32x4_t __ret; \
  54633. __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54634. __ret; \
  54635. })
  54636. #else
  54637. #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  54638. float32x4_t __s0 = __p0; \
  54639. float32x2_t __s1 = __p1; \
  54640. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54641. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  54642. float32x4_t __ret; \
  54643. __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54644. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54645. __ret; \
  54646. })
  54647. #endif
  54648. #ifdef __LITTLE_ENDIAN__
  54649. #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  54650. float32x2_t __s0 = __p0; \
  54651. float32x2_t __s1 = __p1; \
  54652. float32x2_t __ret; \
  54653. __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54654. __ret; \
  54655. })
  54656. #else
  54657. #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
  54658. float32x2_t __s0 = __p0; \
  54659. float32x2_t __s1 = __p1; \
  54660. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54661. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  54662. float32x2_t __ret; \
  54663. __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54664. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54665. __ret; \
  54666. })
  54667. #endif
  54668. #ifdef __LITTLE_ENDIAN__
  54669. #define vmulxd_laneq_f64(__p0_154, __p1_154, __p2_154) __extension__ ({ \
  54670. float64_t __s0_154 = __p0_154; \
  54671. float64x2_t __s1_154 = __p1_154; \
  54672. float64_t __ret_154; \
  54673. __ret_154 = vmulxd_f64(__s0_154, vgetq_lane_f64(__s1_154, __p2_154)); \
  54674. __ret_154; \
  54675. })
  54676. #else
  54677. #define vmulxd_laneq_f64(__p0_155, __p1_155, __p2_155) __extension__ ({ \
  54678. float64_t __s0_155 = __p0_155; \
  54679. float64x2_t __s1_155 = __p1_155; \
  54680. float64x2_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 1, 0); \
  54681. float64_t __ret_155; \
  54682. __ret_155 = __noswap_vmulxd_f64(__s0_155, __noswap_vgetq_lane_f64(__rev1_155, __p2_155)); \
  54683. __ret_155; \
  54684. })
  54685. #endif
  54686. #ifdef __LITTLE_ENDIAN__
  54687. #define vmulxs_laneq_f32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
  54688. float32_t __s0_156 = __p0_156; \
  54689. float32x4_t __s1_156 = __p1_156; \
  54690. float32_t __ret_156; \
  54691. __ret_156 = vmulxs_f32(__s0_156, vgetq_lane_f32(__s1_156, __p2_156)); \
  54692. __ret_156; \
  54693. })
  54694. #else
  54695. #define vmulxs_laneq_f32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
  54696. float32_t __s0_157 = __p0_157; \
  54697. float32x4_t __s1_157 = __p1_157; \
  54698. float32x4_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
  54699. float32_t __ret_157; \
  54700. __ret_157 = __noswap_vmulxs_f32(__s0_157, __noswap_vgetq_lane_f32(__rev1_157, __p2_157)); \
  54701. __ret_157; \
  54702. })
  54703. #endif
  54704. #ifdef __LITTLE_ENDIAN__
  54705. #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  54706. float64x2_t __s0 = __p0; \
  54707. float64x2_t __s1 = __p1; \
  54708. float64x2_t __ret; \
  54709. __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54710. __ret; \
  54711. })
  54712. #else
  54713. #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
  54714. float64x2_t __s0 = __p0; \
  54715. float64x2_t __s1 = __p1; \
  54716. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54717. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  54718. float64x2_t __ret; \
  54719. __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54720. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54721. __ret; \
  54722. })
  54723. #endif
  54724. #ifdef __LITTLE_ENDIAN__
  54725. #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  54726. float32x4_t __s0 = __p0; \
  54727. float32x4_t __s1 = __p1; \
  54728. float32x4_t __ret; \
  54729. __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  54730. __ret; \
  54731. })
  54732. #else
  54733. #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  54734. float32x4_t __s0 = __p0; \
  54735. float32x4_t __s1 = __p1; \
  54736. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  54737. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54738. float32x4_t __ret; \
  54739. __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  54740. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  54741. __ret; \
  54742. })
  54743. #endif
  54744. #ifdef __LITTLE_ENDIAN__
  54745. #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  54746. float32x2_t __s0 = __p0; \
  54747. float32x4_t __s1 = __p1; \
  54748. float32x2_t __ret; \
  54749. __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  54750. __ret; \
  54751. })
  54752. #else
  54753. #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
  54754. float32x2_t __s0 = __p0; \
  54755. float32x4_t __s1 = __p1; \
  54756. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  54757. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  54758. float32x2_t __ret; \
  54759. __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  54760. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  54761. __ret; \
  54762. })
  54763. #endif
  54764. #ifdef __LITTLE_ENDIAN__
  54765. __ai float64x2_t vnegq_f64(float64x2_t __p0) {
  54766. float64x2_t __ret;
  54767. __ret = -__p0;
  54768. return __ret;
  54769. }
  54770. #else
  54771. __ai float64x2_t vnegq_f64(float64x2_t __p0) {
  54772. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54773. float64x2_t __ret;
  54774. __ret = -__rev0;
  54775. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54776. return __ret;
  54777. }
  54778. #endif
  54779. #ifdef __LITTLE_ENDIAN__
  54780. __ai int64x2_t vnegq_s64(int64x2_t __p0) {
  54781. int64x2_t __ret;
  54782. __ret = -__p0;
  54783. return __ret;
  54784. }
  54785. #else
  54786. __ai int64x2_t vnegq_s64(int64x2_t __p0) {
  54787. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54788. int64x2_t __ret;
  54789. __ret = -__rev0;
  54790. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54791. return __ret;
  54792. }
  54793. #endif
  54794. #ifdef __LITTLE_ENDIAN__
  54795. __ai float64x1_t vneg_f64(float64x1_t __p0) {
  54796. float64x1_t __ret;
  54797. __ret = -__p0;
  54798. return __ret;
  54799. }
  54800. #else
  54801. __ai float64x1_t vneg_f64(float64x1_t __p0) {
  54802. float64x1_t __ret;
  54803. __ret = -__p0;
  54804. return __ret;
  54805. }
  54806. #endif
  54807. #ifdef __LITTLE_ENDIAN__
  54808. __ai int64x1_t vneg_s64(int64x1_t __p0) {
  54809. int64x1_t __ret;
  54810. __ret = -__p0;
  54811. return __ret;
  54812. }
  54813. #else
  54814. __ai int64x1_t vneg_s64(int64x1_t __p0) {
  54815. int64x1_t __ret;
  54816. __ret = -__p0;
  54817. return __ret;
  54818. }
  54819. #endif
  54820. #ifdef __LITTLE_ENDIAN__
  54821. __ai int64_t vnegd_s64(int64_t __p0) {
  54822. int64_t __ret;
  54823. __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
  54824. return __ret;
  54825. }
  54826. #else
  54827. __ai int64_t vnegd_s64(int64_t __p0) {
  54828. int64_t __ret;
  54829. __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
  54830. return __ret;
  54831. }
  54832. #endif
  54833. #ifdef __LITTLE_ENDIAN__
  54834. __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  54835. uint8x16_t __ret;
  54836. __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  54837. return __ret;
  54838. }
  54839. #else
  54840. __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  54841. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54842. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54843. uint8x16_t __ret;
  54844. __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  54845. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54846. return __ret;
  54847. }
  54848. #endif
  54849. #ifdef __LITTLE_ENDIAN__
  54850. __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  54851. uint32x4_t __ret;
  54852. __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  54853. return __ret;
  54854. }
  54855. #else
  54856. __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  54857. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54858. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54859. uint32x4_t __ret;
  54860. __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  54861. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54862. return __ret;
  54863. }
  54864. #endif
  54865. #ifdef __LITTLE_ENDIAN__
  54866. __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  54867. uint64x2_t __ret;
  54868. __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  54869. return __ret;
  54870. }
  54871. #else
  54872. __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  54873. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54874. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54875. uint64x2_t __ret;
  54876. __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  54877. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54878. return __ret;
  54879. }
  54880. #endif
  54881. #ifdef __LITTLE_ENDIAN__
  54882. __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  54883. uint16x8_t __ret;
  54884. __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  54885. return __ret;
  54886. }
  54887. #else
  54888. __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  54889. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54890. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  54891. uint16x8_t __ret;
  54892. __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  54893. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  54894. return __ret;
  54895. }
  54896. #endif
  54897. #ifdef __LITTLE_ENDIAN__
  54898. __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  54899. int8x16_t __ret;
  54900. __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  54901. return __ret;
  54902. }
  54903. #else
  54904. __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  54905. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54906. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54907. int8x16_t __ret;
  54908. __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  54909. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  54910. return __ret;
  54911. }
  54912. #endif
  54913. #ifdef __LITTLE_ENDIAN__
  54914. __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
  54915. float64x2_t __ret;
  54916. __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  54917. return __ret;
  54918. }
  54919. #else
  54920. __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
  54921. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54922. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54923. float64x2_t __ret;
  54924. __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  54925. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54926. return __ret;
  54927. }
  54928. #endif
  54929. #ifdef __LITTLE_ENDIAN__
  54930. __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
  54931. float32x4_t __ret;
  54932. __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  54933. return __ret;
  54934. }
  54935. #else
  54936. __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
  54937. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54938. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54939. float32x4_t __ret;
  54940. __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  54941. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54942. return __ret;
  54943. }
  54944. #endif
  54945. #ifdef __LITTLE_ENDIAN__
  54946. __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  54947. int32x4_t __ret;
  54948. __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  54949. return __ret;
  54950. }
  54951. #else
  54952. __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  54953. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  54954. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  54955. int32x4_t __ret;
  54956. __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  54957. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  54958. return __ret;
  54959. }
  54960. #endif
  54961. #ifdef __LITTLE_ENDIAN__
  54962. __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  54963. int64x2_t __ret;
  54964. __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  54965. return __ret;
  54966. }
  54967. #else
  54968. __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  54969. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  54970. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  54971. int64x2_t __ret;
  54972. __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  54973. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  54974. return __ret;
  54975. }
  54976. #endif
  54977. #ifdef __LITTLE_ENDIAN__
  54978. __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  54979. int16x8_t __ret;
  54980. __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  54981. return __ret;
  54982. }
  54983. #else
  54984. __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  54985. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  54986. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  54987. int16x8_t __ret;
  54988. __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  54989. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  54990. return __ret;
  54991. }
  54992. #endif
  54993. #ifdef __LITTLE_ENDIAN__
  54994. __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
  54995. uint64_t __ret;
  54996. __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
  54997. return __ret;
  54998. }
  54999. #else
  55000. __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
  55001. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55002. uint64_t __ret;
  55003. __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
  55004. return __ret;
  55005. }
  55006. #endif
  55007. #ifdef __LITTLE_ENDIAN__
  55008. __ai float64_t vpaddd_f64(float64x2_t __p0) {
  55009. float64_t __ret;
  55010. __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
  55011. return __ret;
  55012. }
  55013. #else
  55014. __ai float64_t vpaddd_f64(float64x2_t __p0) {
  55015. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55016. float64_t __ret;
  55017. __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
  55018. return __ret;
  55019. }
  55020. #endif
  55021. #ifdef __LITTLE_ENDIAN__
  55022. __ai int64_t vpaddd_s64(int64x2_t __p0) {
  55023. int64_t __ret;
  55024. __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
  55025. return __ret;
  55026. }
  55027. #else
  55028. __ai int64_t vpaddd_s64(int64x2_t __p0) {
  55029. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55030. int64_t __ret;
  55031. __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
  55032. return __ret;
  55033. }
  55034. #endif
  55035. #ifdef __LITTLE_ENDIAN__
  55036. __ai float32_t vpadds_f32(float32x2_t __p0) {
  55037. float32_t __ret;
  55038. __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
  55039. return __ret;
  55040. }
  55041. #else
  55042. __ai float32_t vpadds_f32(float32x2_t __p0) {
  55043. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55044. float32_t __ret;
  55045. __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
  55046. return __ret;
  55047. }
  55048. #endif
  55049. #ifdef __LITTLE_ENDIAN__
  55050. __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  55051. uint8x16_t __ret;
  55052. __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  55053. return __ret;
  55054. }
  55055. #else
  55056. __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  55057. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55058. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55059. uint8x16_t __ret;
  55060. __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  55061. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55062. return __ret;
  55063. }
  55064. #endif
  55065. #ifdef __LITTLE_ENDIAN__
  55066. __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  55067. uint32x4_t __ret;
  55068. __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  55069. return __ret;
  55070. }
  55071. #else
  55072. __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  55073. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55074. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55075. uint32x4_t __ret;
  55076. __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  55077. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55078. return __ret;
  55079. }
  55080. #endif
  55081. #ifdef __LITTLE_ENDIAN__
  55082. __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  55083. uint16x8_t __ret;
  55084. __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  55085. return __ret;
  55086. }
  55087. #else
  55088. __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  55089. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  55090. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55091. uint16x8_t __ret;
  55092. __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  55093. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  55094. return __ret;
  55095. }
  55096. #endif
  55097. #ifdef __LITTLE_ENDIAN__
  55098. __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
  55099. int8x16_t __ret;
  55100. __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  55101. return __ret;
  55102. }
  55103. #else
  55104. __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
  55105. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55106. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55107. int8x16_t __ret;
  55108. __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  55109. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55110. return __ret;
  55111. }
  55112. #endif
  55113. #ifdef __LITTLE_ENDIAN__
  55114. __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
  55115. float64x2_t __ret;
  55116. __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  55117. return __ret;
  55118. }
  55119. #else
  55120. __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
  55121. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55122. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55123. float64x2_t __ret;
  55124. __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  55125. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55126. return __ret;
  55127. }
  55128. #endif
  55129. #ifdef __LITTLE_ENDIAN__
  55130. __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
  55131. float32x4_t __ret;
  55132. __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  55133. return __ret;
  55134. }
  55135. #else
  55136. __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
  55137. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55138. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55139. float32x4_t __ret;
  55140. __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  55141. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55142. return __ret;
  55143. }
  55144. #endif
  55145. #ifdef __LITTLE_ENDIAN__
  55146. __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
  55147. int32x4_t __ret;
  55148. __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  55149. return __ret;
  55150. }
  55151. #else
  55152. __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
  55153. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55154. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55155. int32x4_t __ret;
  55156. __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  55157. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55158. return __ret;
  55159. }
  55160. #endif
  55161. #ifdef __LITTLE_ENDIAN__
  55162. __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
  55163. int16x8_t __ret;
  55164. __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  55165. return __ret;
  55166. }
  55167. #else
  55168. __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
  55169. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  55170. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55171. int16x8_t __ret;
  55172. __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  55173. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  55174. return __ret;
  55175. }
  55176. #endif
  55177. #ifdef __LITTLE_ENDIAN__
  55178. __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
  55179. float64_t __ret;
  55180. __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
  55181. return __ret;
  55182. }
  55183. #else
  55184. __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
  55185. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55186. float64_t __ret;
  55187. __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
  55188. return __ret;
  55189. }
  55190. #endif
  55191. #ifdef __LITTLE_ENDIAN__
  55192. __ai float32_t vpmaxs_f32(float32x2_t __p0) {
  55193. float32_t __ret;
  55194. __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
  55195. return __ret;
  55196. }
  55197. #else
  55198. __ai float32_t vpmaxs_f32(float32x2_t __p0) {
  55199. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55200. float32_t __ret;
  55201. __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
  55202. return __ret;
  55203. }
  55204. #endif
  55205. #ifdef __LITTLE_ENDIAN__
  55206. __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  55207. float64x2_t __ret;
  55208. __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  55209. return __ret;
  55210. }
  55211. #else
  55212. __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  55213. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55214. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55215. float64x2_t __ret;
  55216. __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  55217. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55218. return __ret;
  55219. }
  55220. #endif
  55221. #ifdef __LITTLE_ENDIAN__
  55222. __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  55223. float32x4_t __ret;
  55224. __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  55225. return __ret;
  55226. }
  55227. #else
  55228. __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  55229. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55230. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55231. float32x4_t __ret;
  55232. __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  55233. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55234. return __ret;
  55235. }
  55236. #endif
  55237. #ifdef __LITTLE_ENDIAN__
  55238. __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
  55239. float32x2_t __ret;
  55240. __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  55241. return __ret;
  55242. }
  55243. #else
  55244. __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
  55245. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55246. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55247. float32x2_t __ret;
  55248. __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  55249. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55250. return __ret;
  55251. }
  55252. #endif
  55253. #ifdef __LITTLE_ENDIAN__
  55254. __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
  55255. float64_t __ret;
  55256. __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
  55257. return __ret;
  55258. }
  55259. #else
  55260. __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
  55261. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55262. float64_t __ret;
  55263. __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
  55264. return __ret;
  55265. }
  55266. #endif
  55267. #ifdef __LITTLE_ENDIAN__
  55268. __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
  55269. float32_t __ret;
  55270. __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
  55271. return __ret;
  55272. }
  55273. #else
  55274. __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
  55275. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55276. float32_t __ret;
  55277. __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
  55278. return __ret;
  55279. }
  55280. #endif
  55281. #ifdef __LITTLE_ENDIAN__
  55282. __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  55283. uint8x16_t __ret;
  55284. __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  55285. return __ret;
  55286. }
  55287. #else
  55288. __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  55289. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55290. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55291. uint8x16_t __ret;
  55292. __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  55293. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55294. return __ret;
  55295. }
  55296. #endif
  55297. #ifdef __LITTLE_ENDIAN__
  55298. __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  55299. uint32x4_t __ret;
  55300. __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  55301. return __ret;
  55302. }
  55303. #else
  55304. __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  55305. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55306. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55307. uint32x4_t __ret;
  55308. __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  55309. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55310. return __ret;
  55311. }
  55312. #endif
  55313. #ifdef __LITTLE_ENDIAN__
  55314. __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  55315. uint16x8_t __ret;
  55316. __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  55317. return __ret;
  55318. }
  55319. #else
  55320. __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  55321. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  55322. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55323. uint16x8_t __ret;
  55324. __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  55325. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  55326. return __ret;
  55327. }
  55328. #endif
  55329. #ifdef __LITTLE_ENDIAN__
  55330. __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
  55331. int8x16_t __ret;
  55332. __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  55333. return __ret;
  55334. }
  55335. #else
  55336. __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
  55337. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55338. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55339. int8x16_t __ret;
  55340. __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  55341. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  55342. return __ret;
  55343. }
  55344. #endif
  55345. #ifdef __LITTLE_ENDIAN__
  55346. __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
  55347. float64x2_t __ret;
  55348. __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  55349. return __ret;
  55350. }
  55351. #else
  55352. __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
  55353. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55354. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55355. float64x2_t __ret;
  55356. __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  55357. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55358. return __ret;
  55359. }
  55360. #endif
  55361. #ifdef __LITTLE_ENDIAN__
  55362. __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
  55363. float32x4_t __ret;
  55364. __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  55365. return __ret;
  55366. }
  55367. #else
  55368. __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
  55369. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55370. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55371. float32x4_t __ret;
  55372. __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  55373. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55374. return __ret;
  55375. }
  55376. #endif
  55377. #ifdef __LITTLE_ENDIAN__
  55378. __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
  55379. int32x4_t __ret;
  55380. __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  55381. return __ret;
  55382. }
  55383. #else
  55384. __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
  55385. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55386. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55387. int32x4_t __ret;
  55388. __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  55389. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55390. return __ret;
  55391. }
  55392. #endif
  55393. #ifdef __LITTLE_ENDIAN__
  55394. __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
  55395. int16x8_t __ret;
  55396. __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  55397. return __ret;
  55398. }
  55399. #else
  55400. __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
  55401. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  55402. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55403. int16x8_t __ret;
  55404. __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  55405. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  55406. return __ret;
  55407. }
  55408. #endif
  55409. #ifdef __LITTLE_ENDIAN__
  55410. __ai float64_t vpminqd_f64(float64x2_t __p0) {
  55411. float64_t __ret;
  55412. __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
  55413. return __ret;
  55414. }
  55415. #else
  55416. __ai float64_t vpminqd_f64(float64x2_t __p0) {
  55417. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55418. float64_t __ret;
  55419. __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
  55420. return __ret;
  55421. }
  55422. #endif
  55423. #ifdef __LITTLE_ENDIAN__
  55424. __ai float32_t vpmins_f32(float32x2_t __p0) {
  55425. float32_t __ret;
  55426. __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
  55427. return __ret;
  55428. }
  55429. #else
  55430. __ai float32_t vpmins_f32(float32x2_t __p0) {
  55431. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55432. float32_t __ret;
  55433. __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
  55434. return __ret;
  55435. }
  55436. #endif
  55437. #ifdef __LITTLE_ENDIAN__
  55438. __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  55439. float64x2_t __ret;
  55440. __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  55441. return __ret;
  55442. }
  55443. #else
  55444. __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
  55445. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55446. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55447. float64x2_t __ret;
  55448. __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  55449. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55450. return __ret;
  55451. }
  55452. #endif
  55453. #ifdef __LITTLE_ENDIAN__
  55454. __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  55455. float32x4_t __ret;
  55456. __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
  55457. return __ret;
  55458. }
  55459. #else
  55460. __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
  55461. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55462. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55463. float32x4_t __ret;
  55464. __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
  55465. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55466. return __ret;
  55467. }
  55468. #endif
  55469. #ifdef __LITTLE_ENDIAN__
  55470. __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
  55471. float32x2_t __ret;
  55472. __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
  55473. return __ret;
  55474. }
  55475. #else
  55476. __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
  55477. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55478. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  55479. float32x2_t __ret;
  55480. __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
  55481. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55482. return __ret;
  55483. }
  55484. #endif
  55485. #ifdef __LITTLE_ENDIAN__
  55486. __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
  55487. float64_t __ret;
  55488. __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
  55489. return __ret;
  55490. }
  55491. #else
  55492. __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
  55493. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55494. float64_t __ret;
  55495. __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
  55496. return __ret;
  55497. }
  55498. #endif
  55499. #ifdef __LITTLE_ENDIAN__
  55500. __ai float32_t vpminnms_f32(float32x2_t __p0) {
  55501. float32_t __ret;
  55502. __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
  55503. return __ret;
  55504. }
  55505. #else
  55506. __ai float32_t vpminnms_f32(float32x2_t __p0) {
  55507. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55508. float32_t __ret;
  55509. __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
  55510. return __ret;
  55511. }
  55512. #endif
  55513. #ifdef __LITTLE_ENDIAN__
  55514. __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
  55515. int64x2_t __ret;
  55516. __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
  55517. return __ret;
  55518. }
  55519. #else
  55520. __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
  55521. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55522. int64x2_t __ret;
  55523. __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
  55524. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55525. return __ret;
  55526. }
  55527. #endif
  55528. #ifdef __LITTLE_ENDIAN__
  55529. __ai int64x1_t vqabs_s64(int64x1_t __p0) {
  55530. int64x1_t __ret;
  55531. __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
  55532. return __ret;
  55533. }
  55534. #else
  55535. __ai int64x1_t vqabs_s64(int64x1_t __p0) {
  55536. int64x1_t __ret;
  55537. __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
  55538. return __ret;
  55539. }
  55540. #endif
  55541. #ifdef __LITTLE_ENDIAN__
  55542. __ai int8_t vqabsb_s8(int8_t __p0) {
  55543. int8_t __ret;
  55544. __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
  55545. return __ret;
  55546. }
  55547. #else
  55548. __ai int8_t vqabsb_s8(int8_t __p0) {
  55549. int8_t __ret;
  55550. __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
  55551. return __ret;
  55552. }
  55553. #endif
  55554. #ifdef __LITTLE_ENDIAN__
  55555. __ai int32_t vqabss_s32(int32_t __p0) {
  55556. int32_t __ret;
  55557. __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
  55558. return __ret;
  55559. }
  55560. #else
  55561. __ai int32_t vqabss_s32(int32_t __p0) {
  55562. int32_t __ret;
  55563. __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
  55564. return __ret;
  55565. }
  55566. #endif
  55567. #ifdef __LITTLE_ENDIAN__
  55568. __ai int64_t vqabsd_s64(int64_t __p0) {
  55569. int64_t __ret;
  55570. __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
  55571. return __ret;
  55572. }
  55573. #else
  55574. __ai int64_t vqabsd_s64(int64_t __p0) {
  55575. int64_t __ret;
  55576. __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
  55577. return __ret;
  55578. }
  55579. #endif
  55580. #ifdef __LITTLE_ENDIAN__
  55581. __ai int16_t vqabsh_s16(int16_t __p0) {
  55582. int16_t __ret;
  55583. __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
  55584. return __ret;
  55585. }
  55586. #else
  55587. __ai int16_t vqabsh_s16(int16_t __p0) {
  55588. int16_t __ret;
  55589. __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
  55590. return __ret;
  55591. }
  55592. #endif
  55593. #ifdef __LITTLE_ENDIAN__
  55594. __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
  55595. uint8_t __ret;
  55596. __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
  55597. return __ret;
  55598. }
  55599. #else
  55600. __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
  55601. uint8_t __ret;
  55602. __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
  55603. return __ret;
  55604. }
  55605. #endif
  55606. #ifdef __LITTLE_ENDIAN__
  55607. __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
  55608. uint32_t __ret;
  55609. __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
  55610. return __ret;
  55611. }
  55612. #else
  55613. __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
  55614. uint32_t __ret;
  55615. __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
  55616. return __ret;
  55617. }
  55618. #endif
  55619. #ifdef __LITTLE_ENDIAN__
  55620. __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
  55621. uint64_t __ret;
  55622. __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
  55623. return __ret;
  55624. }
  55625. #else
  55626. __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
  55627. uint64_t __ret;
  55628. __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
  55629. return __ret;
  55630. }
  55631. #endif
  55632. #ifdef __LITTLE_ENDIAN__
  55633. __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
  55634. uint16_t __ret;
  55635. __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
  55636. return __ret;
  55637. }
  55638. #else
  55639. __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
  55640. uint16_t __ret;
  55641. __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
  55642. return __ret;
  55643. }
  55644. #endif
  55645. #ifdef __LITTLE_ENDIAN__
  55646. __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
  55647. int8_t __ret;
  55648. __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
  55649. return __ret;
  55650. }
  55651. #else
  55652. __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
  55653. int8_t __ret;
  55654. __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
  55655. return __ret;
  55656. }
  55657. #endif
  55658. #ifdef __LITTLE_ENDIAN__
  55659. __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
  55660. int32_t __ret;
  55661. __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
  55662. return __ret;
  55663. }
  55664. #else
  55665. __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
  55666. int32_t __ret;
  55667. __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
  55668. return __ret;
  55669. }
  55670. __ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) {
  55671. int32_t __ret;
  55672. __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
  55673. return __ret;
  55674. }
  55675. #endif
  55676. #ifdef __LITTLE_ENDIAN__
  55677. __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
  55678. int64_t __ret;
  55679. __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
  55680. return __ret;
  55681. }
  55682. #else
  55683. __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
  55684. int64_t __ret;
  55685. __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
  55686. return __ret;
  55687. }
  55688. #endif
  55689. #ifdef __LITTLE_ENDIAN__
  55690. __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
  55691. int16_t __ret;
  55692. __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
  55693. return __ret;
  55694. }
  55695. #else
  55696. __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
  55697. int16_t __ret;
  55698. __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
  55699. return __ret;
  55700. }
  55701. __ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) {
  55702. int16_t __ret;
  55703. __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
  55704. return __ret;
  55705. }
  55706. #endif
  55707. #ifdef __LITTLE_ENDIAN__
  55708. __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
  55709. int64_t __ret;
  55710. __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
  55711. return __ret;
  55712. }
  55713. #else
  55714. __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
  55715. int64_t __ret;
  55716. __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
  55717. return __ret;
  55718. }
  55719. #endif
  55720. #ifdef __LITTLE_ENDIAN__
  55721. __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
  55722. int32_t __ret;
  55723. __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
  55724. return __ret;
  55725. }
  55726. #else
  55727. __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
  55728. int32_t __ret;
  55729. __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
  55730. return __ret;
  55731. }
  55732. #endif
  55733. #ifdef __LITTLE_ENDIAN__
  55734. __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  55735. int64x2_t __ret;
  55736. __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
  55737. return __ret;
  55738. }
  55739. #else
  55740. __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  55741. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55742. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55743. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  55744. int64x2_t __ret;
  55745. __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
  55746. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55747. return __ret;
  55748. }
  55749. #endif
  55750. #ifdef __LITTLE_ENDIAN__
  55751. __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  55752. int32x4_t __ret;
  55753. __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
  55754. return __ret;
  55755. }
  55756. #else
  55757. __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  55758. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55759. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55760. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  55761. int32x4_t __ret;
  55762. __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
  55763. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55764. return __ret;
  55765. }
  55766. #endif
  55767. #ifdef __LITTLE_ENDIAN__
  55768. #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55769. int64x2_t __s0 = __p0; \
  55770. int32x4_t __s1 = __p1; \
  55771. int32x2_t __s2 = __p2; \
  55772. int64x2_t __ret; \
  55773. __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  55774. __ret; \
  55775. })
  55776. #else
  55777. #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55778. int64x2_t __s0 = __p0; \
  55779. int32x4_t __s1 = __p1; \
  55780. int32x2_t __s2 = __p2; \
  55781. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  55782. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  55783. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  55784. int64x2_t __ret; \
  55785. __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  55786. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  55787. __ret; \
  55788. })
  55789. #endif
  55790. #ifdef __LITTLE_ENDIAN__
  55791. #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55792. int32x4_t __s0 = __p0; \
  55793. int16x8_t __s1 = __p1; \
  55794. int16x4_t __s2 = __p2; \
  55795. int32x4_t __ret; \
  55796. __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  55797. __ret; \
  55798. })
  55799. #else
  55800. #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55801. int32x4_t __s0 = __p0; \
  55802. int16x8_t __s1 = __p1; \
  55803. int16x4_t __s2 = __p2; \
  55804. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  55805. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  55806. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  55807. int32x4_t __ret; \
  55808. __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  55809. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  55810. __ret; \
  55811. })
  55812. #endif
  55813. #ifdef __LITTLE_ENDIAN__
  55814. #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55815. int64x2_t __s0 = __p0; \
  55816. int32x4_t __s1 = __p1; \
  55817. int32x4_t __s2 = __p2; \
  55818. int64x2_t __ret; \
  55819. __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  55820. __ret; \
  55821. })
  55822. #else
  55823. #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55824. int64x2_t __s0 = __p0; \
  55825. int32x4_t __s1 = __p1; \
  55826. int32x4_t __s2 = __p2; \
  55827. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  55828. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  55829. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  55830. int64x2_t __ret; \
  55831. __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  55832. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  55833. __ret; \
  55834. })
  55835. #endif
  55836. #ifdef __LITTLE_ENDIAN__
  55837. #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55838. int32x4_t __s0 = __p0; \
  55839. int16x8_t __s1 = __p1; \
  55840. int16x8_t __s2 = __p2; \
  55841. int32x4_t __ret; \
  55842. __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  55843. __ret; \
  55844. })
  55845. #else
  55846. #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55847. int32x4_t __s0 = __p0; \
  55848. int16x8_t __s1 = __p1; \
  55849. int16x8_t __s2 = __p2; \
  55850. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  55851. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  55852. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  55853. int32x4_t __ret; \
  55854. __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  55855. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  55856. __ret; \
  55857. })
  55858. #endif
  55859. #ifdef __LITTLE_ENDIAN__
  55860. __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  55861. int64x2_t __ret;
  55862. __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
  55863. return __ret;
  55864. }
  55865. #else
  55866. __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  55867. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  55868. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  55869. int64x2_t __ret;
  55870. __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
  55871. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  55872. return __ret;
  55873. }
  55874. #endif
  55875. #ifdef __LITTLE_ENDIAN__
  55876. __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  55877. int32x4_t __ret;
  55878. __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
  55879. return __ret;
  55880. }
  55881. #else
  55882. __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  55883. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  55884. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  55885. int32x4_t __ret;
  55886. __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
  55887. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  55888. return __ret;
  55889. }
  55890. #endif
  55891. #ifdef __LITTLE_ENDIAN__
  55892. #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55893. int64_t __s0 = __p0; \
  55894. int32_t __s1 = __p1; \
  55895. int32x2_t __s2 = __p2; \
  55896. int64_t __ret; \
  55897. __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
  55898. __ret; \
  55899. })
  55900. #else
  55901. #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55902. int64_t __s0 = __p0; \
  55903. int32_t __s1 = __p1; \
  55904. int32x2_t __s2 = __p2; \
  55905. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  55906. int64_t __ret; \
  55907. __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
  55908. __ret; \
  55909. })
  55910. #endif
  55911. #ifdef __LITTLE_ENDIAN__
  55912. #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55913. int32_t __s0 = __p0; \
  55914. int16_t __s1 = __p1; \
  55915. int16x4_t __s2 = __p2; \
  55916. int32_t __ret; \
  55917. __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
  55918. __ret; \
  55919. })
  55920. #else
  55921. #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55922. int32_t __s0 = __p0; \
  55923. int16_t __s1 = __p1; \
  55924. int16x4_t __s2 = __p2; \
  55925. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  55926. int32_t __ret; \
  55927. __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
  55928. __ret; \
  55929. })
  55930. #endif
  55931. #ifdef __LITTLE_ENDIAN__
  55932. #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55933. int64_t __s0 = __p0; \
  55934. int32_t __s1 = __p1; \
  55935. int32x4_t __s2 = __p2; \
  55936. int64_t __ret; \
  55937. __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
  55938. __ret; \
  55939. })
  55940. #else
  55941. #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55942. int64_t __s0 = __p0; \
  55943. int32_t __s1 = __p1; \
  55944. int32x4_t __s2 = __p2; \
  55945. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  55946. int64_t __ret; \
  55947. __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
  55948. __ret; \
  55949. })
  55950. #endif
  55951. #ifdef __LITTLE_ENDIAN__
  55952. #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55953. int32_t __s0 = __p0; \
  55954. int16_t __s1 = __p1; \
  55955. int16x8_t __s2 = __p2; \
  55956. int32_t __ret; \
  55957. __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
  55958. __ret; \
  55959. })
  55960. #else
  55961. #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55962. int32_t __s0 = __p0; \
  55963. int16_t __s1 = __p1; \
  55964. int16x8_t __s2 = __p2; \
  55965. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  55966. int32_t __ret; \
  55967. __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
  55968. __ret; \
  55969. })
  55970. #endif
  55971. #ifdef __LITTLE_ENDIAN__
  55972. #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55973. int64x2_t __s0 = __p0; \
  55974. int32x2_t __s1 = __p1; \
  55975. int32x4_t __s2 = __p2; \
  55976. int64x2_t __ret; \
  55977. __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  55978. __ret; \
  55979. })
  55980. #else
  55981. #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  55982. int64x2_t __s0 = __p0; \
  55983. int32x2_t __s1 = __p1; \
  55984. int32x4_t __s2 = __p2; \
  55985. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  55986. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  55987. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  55988. int64x2_t __ret; \
  55989. __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  55990. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  55991. __ret; \
  55992. })
  55993. #endif
  55994. #ifdef __LITTLE_ENDIAN__
  55995. #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  55996. int32x4_t __s0 = __p0; \
  55997. int16x4_t __s1 = __p1; \
  55998. int16x8_t __s2 = __p2; \
  55999. int32x4_t __ret; \
  56000. __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  56001. __ret; \
  56002. })
  56003. #else
  56004. #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56005. int32x4_t __s0 = __p0; \
  56006. int16x4_t __s1 = __p1; \
  56007. int16x8_t __s2 = __p2; \
  56008. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56009. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56010. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  56011. int32x4_t __ret; \
  56012. __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  56013. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56014. __ret; \
  56015. })
  56016. #endif
  56017. #ifdef __LITTLE_ENDIAN__
  56018. __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
  56019. int64_t __ret;
  56020. __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
  56021. return __ret;
  56022. }
  56023. #else
  56024. __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
  56025. int64_t __ret;
  56026. __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
  56027. return __ret;
  56028. }
  56029. #endif
  56030. #ifdef __LITTLE_ENDIAN__
  56031. __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
  56032. int32_t __ret;
  56033. __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
  56034. return __ret;
  56035. }
  56036. #else
  56037. __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
  56038. int32_t __ret;
  56039. __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
  56040. return __ret;
  56041. }
  56042. #endif
  56043. #ifdef __LITTLE_ENDIAN__
  56044. __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  56045. int64x2_t __ret;
  56046. __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
  56047. return __ret;
  56048. }
  56049. #else
  56050. __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  56051. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  56052. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  56053. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  56054. int64x2_t __ret;
  56055. __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
  56056. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  56057. return __ret;
  56058. }
  56059. #endif
  56060. #ifdef __LITTLE_ENDIAN__
  56061. __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  56062. int32x4_t __ret;
  56063. __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
  56064. return __ret;
  56065. }
  56066. #else
  56067. __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  56068. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56069. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  56070. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  56071. int32x4_t __ret;
  56072. __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
  56073. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56074. return __ret;
  56075. }
  56076. #endif
  56077. #ifdef __LITTLE_ENDIAN__
  56078. #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56079. int64x2_t __s0 = __p0; \
  56080. int32x4_t __s1 = __p1; \
  56081. int32x2_t __s2 = __p2; \
  56082. int64x2_t __ret; \
  56083. __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  56084. __ret; \
  56085. })
  56086. #else
  56087. #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56088. int64x2_t __s0 = __p0; \
  56089. int32x4_t __s1 = __p1; \
  56090. int32x2_t __s2 = __p2; \
  56091. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  56092. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56093. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  56094. int64x2_t __ret; \
  56095. __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  56096. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56097. __ret; \
  56098. })
  56099. #endif
  56100. #ifdef __LITTLE_ENDIAN__
  56101. #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56102. int32x4_t __s0 = __p0; \
  56103. int16x8_t __s1 = __p1; \
  56104. int16x4_t __s2 = __p2; \
  56105. int32x4_t __ret; \
  56106. __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  56107. __ret; \
  56108. })
  56109. #else
  56110. #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56111. int32x4_t __s0 = __p0; \
  56112. int16x8_t __s1 = __p1; \
  56113. int16x4_t __s2 = __p2; \
  56114. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56115. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56116. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  56117. int32x4_t __ret; \
  56118. __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  56119. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56120. __ret; \
  56121. })
  56122. #endif
  56123. #ifdef __LITTLE_ENDIAN__
  56124. #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56125. int64x2_t __s0 = __p0; \
  56126. int32x4_t __s1 = __p1; \
  56127. int32x4_t __s2 = __p2; \
  56128. int64x2_t __ret; \
  56129. __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  56130. __ret; \
  56131. })
  56132. #else
  56133. #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56134. int64x2_t __s0 = __p0; \
  56135. int32x4_t __s1 = __p1; \
  56136. int32x4_t __s2 = __p2; \
  56137. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  56138. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56139. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  56140. int64x2_t __ret; \
  56141. __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  56142. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56143. __ret; \
  56144. })
  56145. #endif
  56146. #ifdef __LITTLE_ENDIAN__
  56147. #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56148. int32x4_t __s0 = __p0; \
  56149. int16x8_t __s1 = __p1; \
  56150. int16x8_t __s2 = __p2; \
  56151. int32x4_t __ret; \
  56152. __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  56153. __ret; \
  56154. })
  56155. #else
  56156. #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56157. int32x4_t __s0 = __p0; \
  56158. int16x8_t __s1 = __p1; \
  56159. int16x8_t __s2 = __p2; \
  56160. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56161. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56162. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  56163. int32x4_t __ret; \
  56164. __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  56165. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56166. __ret; \
  56167. })
  56168. #endif
  56169. #ifdef __LITTLE_ENDIAN__
  56170. __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  56171. int64x2_t __ret;
  56172. __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
  56173. return __ret;
  56174. }
  56175. #else
  56176. __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  56177. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  56178. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  56179. int64x2_t __ret;
  56180. __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
  56181. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  56182. return __ret;
  56183. }
  56184. #endif
  56185. #ifdef __LITTLE_ENDIAN__
  56186. __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  56187. int32x4_t __ret;
  56188. __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
  56189. return __ret;
  56190. }
  56191. #else
  56192. __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  56193. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56194. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  56195. int32x4_t __ret;
  56196. __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
  56197. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56198. return __ret;
  56199. }
  56200. #endif
  56201. #ifdef __LITTLE_ENDIAN__
  56202. #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56203. int64_t __s0 = __p0; \
  56204. int32_t __s1 = __p1; \
  56205. int32x2_t __s2 = __p2; \
  56206. int64_t __ret; \
  56207. __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
  56208. __ret; \
  56209. })
  56210. #else
  56211. #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56212. int64_t __s0 = __p0; \
  56213. int32_t __s1 = __p1; \
  56214. int32x2_t __s2 = __p2; \
  56215. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  56216. int64_t __ret; \
  56217. __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
  56218. __ret; \
  56219. })
  56220. #endif
  56221. #ifdef __LITTLE_ENDIAN__
  56222. #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56223. int32_t __s0 = __p0; \
  56224. int16_t __s1 = __p1; \
  56225. int16x4_t __s2 = __p2; \
  56226. int32_t __ret; \
  56227. __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
  56228. __ret; \
  56229. })
  56230. #else
  56231. #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56232. int32_t __s0 = __p0; \
  56233. int16_t __s1 = __p1; \
  56234. int16x4_t __s2 = __p2; \
  56235. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  56236. int32_t __ret; \
  56237. __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
  56238. __ret; \
  56239. })
  56240. #endif
  56241. #ifdef __LITTLE_ENDIAN__
  56242. #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56243. int64_t __s0 = __p0; \
  56244. int32_t __s1 = __p1; \
  56245. int32x4_t __s2 = __p2; \
  56246. int64_t __ret; \
  56247. __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
  56248. __ret; \
  56249. })
  56250. #else
  56251. #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56252. int64_t __s0 = __p0; \
  56253. int32_t __s1 = __p1; \
  56254. int32x4_t __s2 = __p2; \
  56255. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  56256. int64_t __ret; \
  56257. __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
  56258. __ret; \
  56259. })
  56260. #endif
  56261. #ifdef __LITTLE_ENDIAN__
  56262. #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56263. int32_t __s0 = __p0; \
  56264. int16_t __s1 = __p1; \
  56265. int16x8_t __s2 = __p2; \
  56266. int32_t __ret; \
  56267. __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
  56268. __ret; \
  56269. })
  56270. #else
  56271. #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56272. int32_t __s0 = __p0; \
  56273. int16_t __s1 = __p1; \
  56274. int16x8_t __s2 = __p2; \
  56275. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  56276. int32_t __ret; \
  56277. __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
  56278. __ret; \
  56279. })
  56280. #endif
  56281. #ifdef __LITTLE_ENDIAN__
  56282. #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56283. int64x2_t __s0 = __p0; \
  56284. int32x2_t __s1 = __p1; \
  56285. int32x4_t __s2 = __p2; \
  56286. int64x2_t __ret; \
  56287. __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  56288. __ret; \
  56289. })
  56290. #else
  56291. #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  56292. int64x2_t __s0 = __p0; \
  56293. int32x2_t __s1 = __p1; \
  56294. int32x4_t __s2 = __p2; \
  56295. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  56296. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  56297. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  56298. int64x2_t __ret; \
  56299. __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  56300. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56301. __ret; \
  56302. })
  56303. #endif
  56304. #ifdef __LITTLE_ENDIAN__
  56305. #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56306. int32x4_t __s0 = __p0; \
  56307. int16x4_t __s1 = __p1; \
  56308. int16x8_t __s2 = __p2; \
  56309. int32x4_t __ret; \
  56310. __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  56311. __ret; \
  56312. })
  56313. #else
  56314. #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  56315. int32x4_t __s0 = __p0; \
  56316. int16x4_t __s1 = __p1; \
  56317. int16x8_t __s2 = __p2; \
  56318. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56319. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56320. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
  56321. int32x4_t __ret; \
  56322. __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  56323. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56324. __ret; \
  56325. })
  56326. #endif
  56327. #ifdef __LITTLE_ENDIAN__
  56328. __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
  56329. int32_t __ret;
  56330. __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
  56331. return __ret;
  56332. }
  56333. #else
  56334. __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
  56335. int32_t __ret;
  56336. __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
  56337. return __ret;
  56338. }
  56339. __ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) {
  56340. int32_t __ret;
  56341. __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
  56342. return __ret;
  56343. }
  56344. #endif
  56345. #ifdef __LITTLE_ENDIAN__
  56346. __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
  56347. int16_t __ret;
  56348. __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
  56349. return __ret;
  56350. }
  56351. #else
  56352. __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
  56353. int16_t __ret;
  56354. __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
  56355. return __ret;
  56356. }
  56357. __ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) {
  56358. int16_t __ret;
  56359. __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
  56360. return __ret;
  56361. }
  56362. #endif
  56363. #ifdef __LITTLE_ENDIAN__
  56364. #define vqdmulhs_lane_s32(__p0_158, __p1_158, __p2_158) __extension__ ({ \
  56365. int32_t __s0_158 = __p0_158; \
  56366. int32x2_t __s1_158 = __p1_158; \
  56367. int32_t __ret_158; \
  56368. __ret_158 = vqdmulhs_s32(__s0_158, vget_lane_s32(__s1_158, __p2_158)); \
  56369. __ret_158; \
  56370. })
  56371. #else
  56372. #define vqdmulhs_lane_s32(__p0_159, __p1_159, __p2_159) __extension__ ({ \
  56373. int32_t __s0_159 = __p0_159; \
  56374. int32x2_t __s1_159 = __p1_159; \
  56375. int32x2_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 1, 0); \
  56376. int32_t __ret_159; \
  56377. __ret_159 = __noswap_vqdmulhs_s32(__s0_159, __noswap_vget_lane_s32(__rev1_159, __p2_159)); \
  56378. __ret_159; \
  56379. })
  56380. #endif
  56381. #ifdef __LITTLE_ENDIAN__
  56382. #define vqdmulhh_lane_s16(__p0_160, __p1_160, __p2_160) __extension__ ({ \
  56383. int16_t __s0_160 = __p0_160; \
  56384. int16x4_t __s1_160 = __p1_160; \
  56385. int16_t __ret_160; \
  56386. __ret_160 = vqdmulhh_s16(__s0_160, vget_lane_s16(__s1_160, __p2_160)); \
  56387. __ret_160; \
  56388. })
  56389. #else
  56390. #define vqdmulhh_lane_s16(__p0_161, __p1_161, __p2_161) __extension__ ({ \
  56391. int16_t __s0_161 = __p0_161; \
  56392. int16x4_t __s1_161 = __p1_161; \
  56393. int16x4_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 3, 2, 1, 0); \
  56394. int16_t __ret_161; \
  56395. __ret_161 = __noswap_vqdmulhh_s16(__s0_161, __noswap_vget_lane_s16(__rev1_161, __p2_161)); \
  56396. __ret_161; \
  56397. })
  56398. #endif
  56399. #ifdef __LITTLE_ENDIAN__
  56400. #define vqdmulhs_laneq_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \
  56401. int32_t __s0_162 = __p0_162; \
  56402. int32x4_t __s1_162 = __p1_162; \
  56403. int32_t __ret_162; \
  56404. __ret_162 = vqdmulhs_s32(__s0_162, vgetq_lane_s32(__s1_162, __p2_162)); \
  56405. __ret_162; \
  56406. })
  56407. #else
  56408. #define vqdmulhs_laneq_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \
  56409. int32_t __s0_163 = __p0_163; \
  56410. int32x4_t __s1_163 = __p1_163; \
  56411. int32x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
  56412. int32_t __ret_163; \
  56413. __ret_163 = __noswap_vqdmulhs_s32(__s0_163, __noswap_vgetq_lane_s32(__rev1_163, __p2_163)); \
  56414. __ret_163; \
  56415. })
  56416. #endif
  56417. #ifdef __LITTLE_ENDIAN__
  56418. #define vqdmulhh_laneq_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \
  56419. int16_t __s0_164 = __p0_164; \
  56420. int16x8_t __s1_164 = __p1_164; \
  56421. int16_t __ret_164; \
  56422. __ret_164 = vqdmulhh_s16(__s0_164, vgetq_lane_s16(__s1_164, __p2_164)); \
  56423. __ret_164; \
  56424. })
  56425. #else
  56426. #define vqdmulhh_laneq_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \
  56427. int16_t __s0_165 = __p0_165; \
  56428. int16x8_t __s1_165 = __p1_165; \
  56429. int16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \
  56430. int16_t __ret_165; \
  56431. __ret_165 = __noswap_vqdmulhh_s16(__s0_165, __noswap_vgetq_lane_s16(__rev1_165, __p2_165)); \
  56432. __ret_165; \
  56433. })
  56434. #endif
  56435. #ifdef __LITTLE_ENDIAN__
  56436. #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56437. int32x4_t __s0 = __p0; \
  56438. int32x4_t __s1 = __p1; \
  56439. int32x4_t __ret; \
  56440. __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  56441. __ret; \
  56442. })
  56443. #else
  56444. #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56445. int32x4_t __s0 = __p0; \
  56446. int32x4_t __s1 = __p1; \
  56447. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56448. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56449. int32x4_t __ret; \
  56450. __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  56451. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56452. __ret; \
  56453. })
  56454. #endif
  56455. #ifdef __LITTLE_ENDIAN__
  56456. #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56457. int16x8_t __s0 = __p0; \
  56458. int16x8_t __s1 = __p1; \
  56459. int16x8_t __ret; \
  56460. __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  56461. __ret; \
  56462. })
  56463. #else
  56464. #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56465. int16x8_t __s0 = __p0; \
  56466. int16x8_t __s1 = __p1; \
  56467. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  56468. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56469. int16x8_t __ret; \
  56470. __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  56471. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  56472. __ret; \
  56473. })
  56474. #endif
  56475. #ifdef __LITTLE_ENDIAN__
  56476. #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56477. int32x2_t __s0 = __p0; \
  56478. int32x4_t __s1 = __p1; \
  56479. int32x2_t __ret; \
  56480. __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  56481. __ret; \
  56482. })
  56483. #else
  56484. #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56485. int32x2_t __s0 = __p0; \
  56486. int32x4_t __s1 = __p1; \
  56487. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  56488. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56489. int32x2_t __ret; \
  56490. __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  56491. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56492. __ret; \
  56493. })
  56494. #endif
  56495. #ifdef __LITTLE_ENDIAN__
  56496. #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56497. int16x4_t __s0 = __p0; \
  56498. int16x8_t __s1 = __p1; \
  56499. int16x4_t __ret; \
  56500. __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  56501. __ret; \
  56502. })
  56503. #else
  56504. #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56505. int16x4_t __s0 = __p0; \
  56506. int16x8_t __s1 = __p1; \
  56507. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56508. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56509. int16x4_t __ret; \
  56510. __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  56511. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56512. __ret; \
  56513. })
  56514. #endif
  56515. #ifdef __LITTLE_ENDIAN__
  56516. __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
  56517. int64_t __ret;
  56518. __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
  56519. return __ret;
  56520. }
  56521. #else
  56522. __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
  56523. int64_t __ret;
  56524. __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
  56525. return __ret;
  56526. }
  56527. __ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) {
  56528. int64_t __ret;
  56529. __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
  56530. return __ret;
  56531. }
  56532. #endif
  56533. #ifdef __LITTLE_ENDIAN__
  56534. __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
  56535. int32_t __ret;
  56536. __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
  56537. return __ret;
  56538. }
  56539. #else
  56540. __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
  56541. int32_t __ret;
  56542. __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
  56543. return __ret;
  56544. }
  56545. __ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) {
  56546. int32_t __ret;
  56547. __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
  56548. return __ret;
  56549. }
  56550. #endif
  56551. #ifdef __LITTLE_ENDIAN__
  56552. __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
  56553. int64x2_t __ret;
  56554. __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
  56555. return __ret;
  56556. }
  56557. #else
  56558. __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
  56559. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56560. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  56561. int64x2_t __ret;
  56562. __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
  56563. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  56564. return __ret;
  56565. }
  56566. #endif
  56567. #ifdef __LITTLE_ENDIAN__
  56568. __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
  56569. int32x4_t __ret;
  56570. __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
  56571. return __ret;
  56572. }
  56573. #else
  56574. __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
  56575. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  56576. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  56577. int32x4_t __ret;
  56578. __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
  56579. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56580. return __ret;
  56581. }
  56582. #endif
  56583. #ifdef __LITTLE_ENDIAN__
  56584. #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  56585. int32x4_t __s0 = __p0; \
  56586. int32x2_t __s1 = __p1; \
  56587. int64x2_t __ret; \
  56588. __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  56589. __ret; \
  56590. })
  56591. #else
  56592. #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
  56593. int32x4_t __s0 = __p0; \
  56594. int32x2_t __s1 = __p1; \
  56595. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56596. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  56597. int64x2_t __ret; \
  56598. __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  56599. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56600. __ret; \
  56601. })
  56602. #endif
  56603. #ifdef __LITTLE_ENDIAN__
  56604. #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  56605. int16x8_t __s0 = __p0; \
  56606. int16x4_t __s1 = __p1; \
  56607. int32x4_t __ret; \
  56608. __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  56609. __ret; \
  56610. })
  56611. #else
  56612. #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
  56613. int16x8_t __s0 = __p0; \
  56614. int16x4_t __s1 = __p1; \
  56615. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  56616. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56617. int32x4_t __ret; \
  56618. __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  56619. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56620. __ret; \
  56621. })
  56622. #endif
  56623. #ifdef __LITTLE_ENDIAN__
  56624. #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56625. int32x4_t __s0 = __p0; \
  56626. int32x4_t __s1 = __p1; \
  56627. int64x2_t __ret; \
  56628. __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  56629. __ret; \
  56630. })
  56631. #else
  56632. #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56633. int32x4_t __s0 = __p0; \
  56634. int32x4_t __s1 = __p1; \
  56635. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56636. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56637. int64x2_t __ret; \
  56638. __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  56639. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56640. __ret; \
  56641. })
  56642. #endif
  56643. #ifdef __LITTLE_ENDIAN__
  56644. #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56645. int16x8_t __s0 = __p0; \
  56646. int16x8_t __s1 = __p1; \
  56647. int32x4_t __ret; \
  56648. __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  56649. __ret; \
  56650. })
  56651. #else
  56652. #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56653. int16x8_t __s0 = __p0; \
  56654. int16x8_t __s1 = __p1; \
  56655. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  56656. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56657. int32x4_t __ret; \
  56658. __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  56659. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56660. __ret; \
  56661. })
  56662. #endif
  56663. #ifdef __LITTLE_ENDIAN__
  56664. __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
  56665. int64x2_t __ret;
  56666. __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
  56667. return __ret;
  56668. }
  56669. #else
  56670. __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
  56671. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56672. int64x2_t __ret;
  56673. __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
  56674. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  56675. return __ret;
  56676. }
  56677. #endif
  56678. #ifdef __LITTLE_ENDIAN__
  56679. __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
  56680. int32x4_t __ret;
  56681. __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
  56682. return __ret;
  56683. }
  56684. #else
  56685. __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
  56686. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  56687. int32x4_t __ret;
  56688. __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
  56689. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56690. return __ret;
  56691. }
  56692. #endif
  56693. #ifdef __LITTLE_ENDIAN__
  56694. #define vqdmulls_lane_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \
  56695. int32_t __s0_166 = __p0_166; \
  56696. int32x2_t __s1_166 = __p1_166; \
  56697. int64_t __ret_166; \
  56698. __ret_166 = vqdmulls_s32(__s0_166, vget_lane_s32(__s1_166, __p2_166)); \
  56699. __ret_166; \
  56700. })
  56701. #else
  56702. #define vqdmulls_lane_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \
  56703. int32_t __s0_167 = __p0_167; \
  56704. int32x2_t __s1_167 = __p1_167; \
  56705. int32x2_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 1, 0); \
  56706. int64_t __ret_167; \
  56707. __ret_167 = __noswap_vqdmulls_s32(__s0_167, __noswap_vget_lane_s32(__rev1_167, __p2_167)); \
  56708. __ret_167; \
  56709. })
  56710. #endif
  56711. #ifdef __LITTLE_ENDIAN__
  56712. #define vqdmullh_lane_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \
  56713. int16_t __s0_168 = __p0_168; \
  56714. int16x4_t __s1_168 = __p1_168; \
  56715. int32_t __ret_168; \
  56716. __ret_168 = vqdmullh_s16(__s0_168, vget_lane_s16(__s1_168, __p2_168)); \
  56717. __ret_168; \
  56718. })
  56719. #else
  56720. #define vqdmullh_lane_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \
  56721. int16_t __s0_169 = __p0_169; \
  56722. int16x4_t __s1_169 = __p1_169; \
  56723. int16x4_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 3, 2, 1, 0); \
  56724. int32_t __ret_169; \
  56725. __ret_169 = __noswap_vqdmullh_s16(__s0_169, __noswap_vget_lane_s16(__rev1_169, __p2_169)); \
  56726. __ret_169; \
  56727. })
  56728. #endif
  56729. #ifdef __LITTLE_ENDIAN__
  56730. #define vqdmulls_laneq_s32(__p0_170, __p1_170, __p2_170) __extension__ ({ \
  56731. int32_t __s0_170 = __p0_170; \
  56732. int32x4_t __s1_170 = __p1_170; \
  56733. int64_t __ret_170; \
  56734. __ret_170 = vqdmulls_s32(__s0_170, vgetq_lane_s32(__s1_170, __p2_170)); \
  56735. __ret_170; \
  56736. })
  56737. #else
  56738. #define vqdmulls_laneq_s32(__p0_171, __p1_171, __p2_171) __extension__ ({ \
  56739. int32_t __s0_171 = __p0_171; \
  56740. int32x4_t __s1_171 = __p1_171; \
  56741. int32x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
  56742. int64_t __ret_171; \
  56743. __ret_171 = __noswap_vqdmulls_s32(__s0_171, __noswap_vgetq_lane_s32(__rev1_171, __p2_171)); \
  56744. __ret_171; \
  56745. })
  56746. #endif
  56747. #ifdef __LITTLE_ENDIAN__
  56748. #define vqdmullh_laneq_s16(__p0_172, __p1_172, __p2_172) __extension__ ({ \
  56749. int16_t __s0_172 = __p0_172; \
  56750. int16x8_t __s1_172 = __p1_172; \
  56751. int32_t __ret_172; \
  56752. __ret_172 = vqdmullh_s16(__s0_172, vgetq_lane_s16(__s1_172, __p2_172)); \
  56753. __ret_172; \
  56754. })
  56755. #else
  56756. #define vqdmullh_laneq_s16(__p0_173, __p1_173, __p2_173) __extension__ ({ \
  56757. int16_t __s0_173 = __p0_173; \
  56758. int16x8_t __s1_173 = __p1_173; \
  56759. int16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \
  56760. int32_t __ret_173; \
  56761. __ret_173 = __noswap_vqdmullh_s16(__s0_173, __noswap_vgetq_lane_s16(__rev1_173, __p2_173)); \
  56762. __ret_173; \
  56763. })
  56764. #endif
  56765. #ifdef __LITTLE_ENDIAN__
  56766. #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56767. int32x2_t __s0 = __p0; \
  56768. int32x4_t __s1 = __p1; \
  56769. int64x2_t __ret; \
  56770. __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  56771. __ret; \
  56772. })
  56773. #else
  56774. #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  56775. int32x2_t __s0 = __p0; \
  56776. int32x4_t __s1 = __p1; \
  56777. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  56778. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  56779. int64x2_t __ret; \
  56780. __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  56781. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  56782. __ret; \
  56783. })
  56784. #endif
  56785. #ifdef __LITTLE_ENDIAN__
  56786. #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56787. int16x4_t __s0 = __p0; \
  56788. int16x8_t __s1 = __p1; \
  56789. int32x4_t __ret; \
  56790. __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  56791. __ret; \
  56792. })
  56793. #else
  56794. #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  56795. int16x4_t __s0 = __p0; \
  56796. int16x8_t __s1 = __p1; \
  56797. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  56798. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  56799. int32x4_t __ret; \
  56800. __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  56801. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  56802. __ret; \
  56803. })
  56804. #endif
  56805. #ifdef __LITTLE_ENDIAN__
  56806. __ai int16_t vqmovns_s32(int32_t __p0) {
  56807. int16_t __ret;
  56808. __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
  56809. return __ret;
  56810. }
  56811. #else
  56812. __ai int16_t vqmovns_s32(int32_t __p0) {
  56813. int16_t __ret;
  56814. __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
  56815. return __ret;
  56816. }
  56817. #endif
  56818. #ifdef __LITTLE_ENDIAN__
  56819. __ai int32_t vqmovnd_s64(int64_t __p0) {
  56820. int32_t __ret;
  56821. __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
  56822. return __ret;
  56823. }
  56824. #else
  56825. __ai int32_t vqmovnd_s64(int64_t __p0) {
  56826. int32_t __ret;
  56827. __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
  56828. return __ret;
  56829. }
  56830. #endif
  56831. #ifdef __LITTLE_ENDIAN__
  56832. __ai int8_t vqmovnh_s16(int16_t __p0) {
  56833. int8_t __ret;
  56834. __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
  56835. return __ret;
  56836. }
  56837. #else
  56838. __ai int8_t vqmovnh_s16(int16_t __p0) {
  56839. int8_t __ret;
  56840. __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
  56841. return __ret;
  56842. }
  56843. #endif
  56844. #ifdef __LITTLE_ENDIAN__
  56845. __ai uint16_t vqmovns_u32(uint32_t __p0) {
  56846. uint16_t __ret;
  56847. __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
  56848. return __ret;
  56849. }
  56850. #else
  56851. __ai uint16_t vqmovns_u32(uint32_t __p0) {
  56852. uint16_t __ret;
  56853. __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
  56854. return __ret;
  56855. }
  56856. #endif
  56857. #ifdef __LITTLE_ENDIAN__
  56858. __ai uint32_t vqmovnd_u64(uint64_t __p0) {
  56859. uint32_t __ret;
  56860. __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
  56861. return __ret;
  56862. }
  56863. #else
  56864. __ai uint32_t vqmovnd_u64(uint64_t __p0) {
  56865. uint32_t __ret;
  56866. __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
  56867. return __ret;
  56868. }
  56869. #endif
  56870. #ifdef __LITTLE_ENDIAN__
  56871. __ai uint8_t vqmovnh_u16(uint16_t __p0) {
  56872. uint8_t __ret;
  56873. __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
  56874. return __ret;
  56875. }
  56876. #else
  56877. __ai uint8_t vqmovnh_u16(uint16_t __p0) {
  56878. uint8_t __ret;
  56879. __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
  56880. return __ret;
  56881. }
  56882. #endif
  56883. #ifdef __LITTLE_ENDIAN__
  56884. __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
  56885. uint16x8_t __ret;
  56886. __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
  56887. return __ret;
  56888. }
  56889. #else
  56890. __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
  56891. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56892. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  56893. uint16x8_t __ret;
  56894. __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
  56895. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  56896. return __ret;
  56897. }
  56898. #endif
  56899. #ifdef __LITTLE_ENDIAN__
  56900. __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
  56901. uint32x4_t __ret;
  56902. __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
  56903. return __ret;
  56904. }
  56905. #else
  56906. __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
  56907. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  56908. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  56909. uint32x4_t __ret;
  56910. __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
  56911. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56912. return __ret;
  56913. }
  56914. #endif
  56915. #ifdef __LITTLE_ENDIAN__
  56916. __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
  56917. uint8x16_t __ret;
  56918. __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
  56919. return __ret;
  56920. }
  56921. #else
  56922. __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
  56923. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  56924. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  56925. uint8x16_t __ret;
  56926. __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
  56927. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  56928. return __ret;
  56929. }
  56930. #endif
  56931. #ifdef __LITTLE_ENDIAN__
  56932. __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
  56933. int16x8_t __ret;
  56934. __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
  56935. return __ret;
  56936. }
  56937. #else
  56938. __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
  56939. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  56940. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  56941. int16x8_t __ret;
  56942. __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
  56943. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  56944. return __ret;
  56945. }
  56946. #endif
  56947. #ifdef __LITTLE_ENDIAN__
  56948. __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
  56949. int32x4_t __ret;
  56950. __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
  56951. return __ret;
  56952. }
  56953. #else
  56954. __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
  56955. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  56956. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  56957. int32x4_t __ret;
  56958. __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
  56959. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  56960. return __ret;
  56961. }
  56962. #endif
  56963. #ifdef __LITTLE_ENDIAN__
  56964. __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
  56965. int8x16_t __ret;
  56966. __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
  56967. return __ret;
  56968. }
  56969. #else
  56970. __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
  56971. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  56972. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  56973. int8x16_t __ret;
  56974. __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
  56975. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  56976. return __ret;
  56977. }
  56978. #endif
  56979. #ifdef __LITTLE_ENDIAN__
  56980. __ai int16_t vqmovuns_s32(int32_t __p0) {
  56981. int16_t __ret;
  56982. __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
  56983. return __ret;
  56984. }
  56985. #else
  56986. __ai int16_t vqmovuns_s32(int32_t __p0) {
  56987. int16_t __ret;
  56988. __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
  56989. return __ret;
  56990. }
  56991. #endif
  56992. #ifdef __LITTLE_ENDIAN__
  56993. __ai int32_t vqmovund_s64(int64_t __p0) {
  56994. int32_t __ret;
  56995. __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
  56996. return __ret;
  56997. }
  56998. #else
  56999. __ai int32_t vqmovund_s64(int64_t __p0) {
  57000. int32_t __ret;
  57001. __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
  57002. return __ret;
  57003. }
  57004. #endif
  57005. #ifdef __LITTLE_ENDIAN__
  57006. __ai int8_t vqmovunh_s16(int16_t __p0) {
  57007. int8_t __ret;
  57008. __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
  57009. return __ret;
  57010. }
  57011. #else
  57012. __ai int8_t vqmovunh_s16(int16_t __p0) {
  57013. int8_t __ret;
  57014. __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
  57015. return __ret;
  57016. }
  57017. #endif
  57018. #ifdef __LITTLE_ENDIAN__
  57019. __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
  57020. uint16x8_t __ret;
  57021. __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
  57022. return __ret;
  57023. }
  57024. #else
  57025. __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
  57026. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  57027. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  57028. uint16x8_t __ret;
  57029. __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
  57030. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  57031. return __ret;
  57032. }
  57033. #endif
  57034. #ifdef __LITTLE_ENDIAN__
  57035. __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
  57036. uint32x4_t __ret;
  57037. __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
  57038. return __ret;
  57039. }
  57040. #else
  57041. __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
  57042. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  57043. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  57044. uint32x4_t __ret;
  57045. __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
  57046. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  57047. return __ret;
  57048. }
  57049. #endif
  57050. #ifdef __LITTLE_ENDIAN__
  57051. __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
  57052. uint8x16_t __ret;
  57053. __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
  57054. return __ret;
  57055. }
  57056. #else
  57057. __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
  57058. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  57059. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  57060. uint8x16_t __ret;
  57061. __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
  57062. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  57063. return __ret;
  57064. }
  57065. #endif
  57066. #ifdef __LITTLE_ENDIAN__
  57067. __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
  57068. int64x2_t __ret;
  57069. __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
  57070. return __ret;
  57071. }
  57072. #else
  57073. __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
  57074. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  57075. int64x2_t __ret;
  57076. __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
  57077. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  57078. return __ret;
  57079. }
  57080. #endif
  57081. #ifdef __LITTLE_ENDIAN__
  57082. __ai int64x1_t vqneg_s64(int64x1_t __p0) {
  57083. int64x1_t __ret;
  57084. __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
  57085. return __ret;
  57086. }
  57087. #else
  57088. __ai int64x1_t vqneg_s64(int64x1_t __p0) {
  57089. int64x1_t __ret;
  57090. __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
  57091. return __ret;
  57092. }
  57093. #endif
  57094. #ifdef __LITTLE_ENDIAN__
  57095. __ai int8_t vqnegb_s8(int8_t __p0) {
  57096. int8_t __ret;
  57097. __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
  57098. return __ret;
  57099. }
  57100. #else
  57101. __ai int8_t vqnegb_s8(int8_t __p0) {
  57102. int8_t __ret;
  57103. __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
  57104. return __ret;
  57105. }
  57106. #endif
  57107. #ifdef __LITTLE_ENDIAN__
  57108. __ai int32_t vqnegs_s32(int32_t __p0) {
  57109. int32_t __ret;
  57110. __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
  57111. return __ret;
  57112. }
  57113. #else
  57114. __ai int32_t vqnegs_s32(int32_t __p0) {
  57115. int32_t __ret;
  57116. __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
  57117. return __ret;
  57118. }
  57119. #endif
  57120. #ifdef __LITTLE_ENDIAN__
  57121. __ai int64_t vqnegd_s64(int64_t __p0) {
  57122. int64_t __ret;
  57123. __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
  57124. return __ret;
  57125. }
  57126. #else
  57127. __ai int64_t vqnegd_s64(int64_t __p0) {
  57128. int64_t __ret;
  57129. __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
  57130. return __ret;
  57131. }
  57132. #endif
  57133. #ifdef __LITTLE_ENDIAN__
  57134. __ai int16_t vqnegh_s16(int16_t __p0) {
  57135. int16_t __ret;
  57136. __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
  57137. return __ret;
  57138. }
  57139. #else
  57140. __ai int16_t vqnegh_s16(int16_t __p0) {
  57141. int16_t __ret;
  57142. __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
  57143. return __ret;
  57144. }
  57145. #endif
  57146. #ifdef __LITTLE_ENDIAN__
  57147. __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
  57148. int32_t __ret;
  57149. __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
  57150. return __ret;
  57151. }
  57152. #else
  57153. __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
  57154. int32_t __ret;
  57155. __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
  57156. return __ret;
  57157. }
  57158. __ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
  57159. int32_t __ret;
  57160. __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
  57161. return __ret;
  57162. }
  57163. #endif
  57164. #ifdef __LITTLE_ENDIAN__
  57165. __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
  57166. int16_t __ret;
  57167. __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
  57168. return __ret;
  57169. }
  57170. #else
  57171. __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
  57172. int16_t __ret;
  57173. __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
  57174. return __ret;
  57175. }
  57176. __ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
  57177. int16_t __ret;
  57178. __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
  57179. return __ret;
  57180. }
  57181. #endif
  57182. #ifdef __LITTLE_ENDIAN__
  57183. #define vqrdmulhs_lane_s32(__p0_174, __p1_174, __p2_174) __extension__ ({ \
  57184. int32_t __s0_174 = __p0_174; \
  57185. int32x2_t __s1_174 = __p1_174; \
  57186. int32_t __ret_174; \
  57187. __ret_174 = vqrdmulhs_s32(__s0_174, vget_lane_s32(__s1_174, __p2_174)); \
  57188. __ret_174; \
  57189. })
  57190. #else
  57191. #define vqrdmulhs_lane_s32(__p0_175, __p1_175, __p2_175) __extension__ ({ \
  57192. int32_t __s0_175 = __p0_175; \
  57193. int32x2_t __s1_175 = __p1_175; \
  57194. int32x2_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 1, 0); \
  57195. int32_t __ret_175; \
  57196. __ret_175 = __noswap_vqrdmulhs_s32(__s0_175, __noswap_vget_lane_s32(__rev1_175, __p2_175)); \
  57197. __ret_175; \
  57198. })
  57199. #endif
  57200. #ifdef __LITTLE_ENDIAN__
  57201. #define vqrdmulhh_lane_s16(__p0_176, __p1_176, __p2_176) __extension__ ({ \
  57202. int16_t __s0_176 = __p0_176; \
  57203. int16x4_t __s1_176 = __p1_176; \
  57204. int16_t __ret_176; \
  57205. __ret_176 = vqrdmulhh_s16(__s0_176, vget_lane_s16(__s1_176, __p2_176)); \
  57206. __ret_176; \
  57207. })
  57208. #else
  57209. #define vqrdmulhh_lane_s16(__p0_177, __p1_177, __p2_177) __extension__ ({ \
  57210. int16_t __s0_177 = __p0_177; \
  57211. int16x4_t __s1_177 = __p1_177; \
  57212. int16x4_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
  57213. int16_t __ret_177; \
  57214. __ret_177 = __noswap_vqrdmulhh_s16(__s0_177, __noswap_vget_lane_s16(__rev1_177, __p2_177)); \
  57215. __ret_177; \
  57216. })
  57217. #endif
  57218. #ifdef __LITTLE_ENDIAN__
  57219. #define vqrdmulhs_laneq_s32(__p0_178, __p1_178, __p2_178) __extension__ ({ \
  57220. int32_t __s0_178 = __p0_178; \
  57221. int32x4_t __s1_178 = __p1_178; \
  57222. int32_t __ret_178; \
  57223. __ret_178 = vqrdmulhs_s32(__s0_178, vgetq_lane_s32(__s1_178, __p2_178)); \
  57224. __ret_178; \
  57225. })
  57226. #else
  57227. #define vqrdmulhs_laneq_s32(__p0_179, __p1_179, __p2_179) __extension__ ({ \
  57228. int32_t __s0_179 = __p0_179; \
  57229. int32x4_t __s1_179 = __p1_179; \
  57230. int32x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 3, 2, 1, 0); \
  57231. int32_t __ret_179; \
  57232. __ret_179 = __noswap_vqrdmulhs_s32(__s0_179, __noswap_vgetq_lane_s32(__rev1_179, __p2_179)); \
  57233. __ret_179; \
  57234. })
  57235. #endif
  57236. #ifdef __LITTLE_ENDIAN__
  57237. #define vqrdmulhh_laneq_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
  57238. int16_t __s0_180 = __p0_180; \
  57239. int16x8_t __s1_180 = __p1_180; \
  57240. int16_t __ret_180; \
  57241. __ret_180 = vqrdmulhh_s16(__s0_180, vgetq_lane_s16(__s1_180, __p2_180)); \
  57242. __ret_180; \
  57243. })
  57244. #else
  57245. #define vqrdmulhh_laneq_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
  57246. int16_t __s0_181 = __p0_181; \
  57247. int16x8_t __s1_181 = __p1_181; \
  57248. int16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
  57249. int16_t __ret_181; \
  57250. __ret_181 = __noswap_vqrdmulhh_s16(__s0_181, __noswap_vgetq_lane_s16(__rev1_181, __p2_181)); \
  57251. __ret_181; \
  57252. })
  57253. #endif
  57254. #ifdef __LITTLE_ENDIAN__
  57255. #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  57256. int32x4_t __s0 = __p0; \
  57257. int32x4_t __s1 = __p1; \
  57258. int32x4_t __ret; \
  57259. __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  57260. __ret; \
  57261. })
  57262. #else
  57263. #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  57264. int32x4_t __s0 = __p0; \
  57265. int32x4_t __s1 = __p1; \
  57266. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  57267. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  57268. int32x4_t __ret; \
  57269. __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  57270. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  57271. __ret; \
  57272. })
  57273. #endif
  57274. #ifdef __LITTLE_ENDIAN__
  57275. #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  57276. int16x8_t __s0 = __p0; \
  57277. int16x8_t __s1 = __p1; \
  57278. int16x8_t __ret; \
  57279. __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  57280. __ret; \
  57281. })
  57282. #else
  57283. #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  57284. int16x8_t __s0 = __p0; \
  57285. int16x8_t __s1 = __p1; \
  57286. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
  57287. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  57288. int16x8_t __ret; \
  57289. __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
  57290. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
  57291. __ret; \
  57292. })
  57293. #endif
  57294. #ifdef __LITTLE_ENDIAN__
  57295. #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  57296. int32x2_t __s0 = __p0; \
  57297. int32x4_t __s1 = __p1; \
  57298. int32x2_t __ret; \
  57299. __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
  57300. __ret; \
  57301. })
  57302. #else
  57303. #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
  57304. int32x2_t __s0 = __p0; \
  57305. int32x4_t __s1 = __p1; \
  57306. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  57307. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  57308. int32x2_t __ret; \
  57309. __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
  57310. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  57311. __ret; \
  57312. })
  57313. #endif
  57314. #ifdef __LITTLE_ENDIAN__
  57315. #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  57316. int16x4_t __s0 = __p0; \
  57317. int16x8_t __s1 = __p1; \
  57318. int16x4_t __ret; \
  57319. __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
  57320. __ret; \
  57321. })
  57322. #else
  57323. #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
  57324. int16x4_t __s0 = __p0; \
  57325. int16x8_t __s1 = __p1; \
  57326. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  57327. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
  57328. int16x4_t __ret; \
  57329. __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
  57330. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  57331. __ret; \
  57332. })
  57333. #endif
  57334. #ifdef __LITTLE_ENDIAN__
  57335. __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
  57336. uint8_t __ret;
  57337. __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
  57338. return __ret;
  57339. }
  57340. #else
  57341. __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
  57342. uint8_t __ret;
  57343. __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
  57344. return __ret;
  57345. }
  57346. #endif
  57347. #ifdef __LITTLE_ENDIAN__
  57348. __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
  57349. uint32_t __ret;
  57350. __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
  57351. return __ret;
  57352. }
  57353. #else
  57354. __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
  57355. uint32_t __ret;
  57356. __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
  57357. return __ret;
  57358. }
  57359. #endif
  57360. #ifdef __LITTLE_ENDIAN__
  57361. __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
  57362. uint64_t __ret;
  57363. __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
  57364. return __ret;
  57365. }
  57366. #else
  57367. __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
  57368. uint64_t __ret;
  57369. __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
  57370. return __ret;
  57371. }
  57372. #endif
  57373. #ifdef __LITTLE_ENDIAN__
  57374. __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
  57375. uint16_t __ret;
  57376. __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
  57377. return __ret;
  57378. }
  57379. #else
  57380. __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
  57381. uint16_t __ret;
  57382. __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
  57383. return __ret;
  57384. }
  57385. #endif
  57386. #ifdef __LITTLE_ENDIAN__
  57387. __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
  57388. int8_t __ret;
  57389. __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
  57390. return __ret;
  57391. }
  57392. #else
  57393. __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
  57394. int8_t __ret;
  57395. __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
  57396. return __ret;
  57397. }
  57398. #endif
  57399. #ifdef __LITTLE_ENDIAN__
  57400. __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
  57401. int32_t __ret;
  57402. __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
  57403. return __ret;
  57404. }
  57405. #else
  57406. __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
  57407. int32_t __ret;
  57408. __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
  57409. return __ret;
  57410. }
  57411. #endif
  57412. #ifdef __LITTLE_ENDIAN__
  57413. __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
  57414. int64_t __ret;
  57415. __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
  57416. return __ret;
  57417. }
  57418. #else
  57419. __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
  57420. int64_t __ret;
  57421. __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
  57422. return __ret;
  57423. }
  57424. #endif
  57425. #ifdef __LITTLE_ENDIAN__
  57426. __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
  57427. int16_t __ret;
  57428. __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
  57429. return __ret;
  57430. }
  57431. #else
  57432. __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
  57433. int16_t __ret;
  57434. __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
  57435. return __ret;
  57436. }
  57437. #endif
  57438. #ifdef __LITTLE_ENDIAN__
  57439. #define vqrshrn_high_n_u32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
  57440. uint16x4_t __s0_182 = __p0_182; \
  57441. uint32x4_t __s1_182 = __p1_182; \
  57442. uint16x8_t __ret_182; \
  57443. __ret_182 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_182), (uint16x4_t)(vqrshrn_n_u32(__s1_182, __p2_182)))); \
  57444. __ret_182; \
  57445. })
  57446. #else
  57447. #define vqrshrn_high_n_u32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
  57448. uint16x4_t __s0_183 = __p0_183; \
  57449. uint32x4_t __s1_183 = __p1_183; \
  57450. uint16x4_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
  57451. uint32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
  57452. uint16x8_t __ret_183; \
  57453. __ret_183 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_183), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_183, __p2_183)))); \
  57454. __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
  57455. __ret_183; \
  57456. })
  57457. #endif
  57458. #ifdef __LITTLE_ENDIAN__
  57459. #define vqrshrn_high_n_u64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
  57460. uint32x2_t __s0_184 = __p0_184; \
  57461. uint64x2_t __s1_184 = __p1_184; \
  57462. uint32x4_t __ret_184; \
  57463. __ret_184 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_184), (uint32x2_t)(vqrshrn_n_u64(__s1_184, __p2_184)))); \
  57464. __ret_184; \
  57465. })
  57466. #else
  57467. #define vqrshrn_high_n_u64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
  57468. uint32x2_t __s0_185 = __p0_185; \
  57469. uint64x2_t __s1_185 = __p1_185; \
  57470. uint32x2_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
  57471. uint64x2_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
  57472. uint32x4_t __ret_185; \
  57473. __ret_185 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_185), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_185, __p2_185)))); \
  57474. __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
  57475. __ret_185; \
  57476. })
  57477. #endif
  57478. #ifdef __LITTLE_ENDIAN__
  57479. #define vqrshrn_high_n_u16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
  57480. uint8x8_t __s0_186 = __p0_186; \
  57481. uint16x8_t __s1_186 = __p1_186; \
  57482. uint8x16_t __ret_186; \
  57483. __ret_186 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_186), (uint8x8_t)(vqrshrn_n_u16(__s1_186, __p2_186)))); \
  57484. __ret_186; \
  57485. })
  57486. #else
  57487. #define vqrshrn_high_n_u16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
  57488. uint8x8_t __s0_187 = __p0_187; \
  57489. uint16x8_t __s1_187 = __p1_187; \
  57490. uint8x8_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
  57491. uint16x8_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
  57492. uint8x16_t __ret_187; \
  57493. __ret_187 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_187), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_187, __p2_187)))); \
  57494. __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  57495. __ret_187; \
  57496. })
  57497. #endif
  57498. #ifdef __LITTLE_ENDIAN__
  57499. #define vqrshrn_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
  57500. int16x4_t __s0_188 = __p0_188; \
  57501. int32x4_t __s1_188 = __p1_188; \
  57502. int16x8_t __ret_188; \
  57503. __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrn_n_s32(__s1_188, __p2_188)))); \
  57504. __ret_188; \
  57505. })
  57506. #else
  57507. #define vqrshrn_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
  57508. int16x4_t __s0_189 = __p0_189; \
  57509. int32x4_t __s1_189 = __p1_189; \
  57510. int16x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
  57511. int32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
  57512. int16x8_t __ret_189; \
  57513. __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_189, __p2_189)))); \
  57514. __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
  57515. __ret_189; \
  57516. })
  57517. #endif
  57518. #ifdef __LITTLE_ENDIAN__
  57519. #define vqrshrn_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
  57520. int32x2_t __s0_190 = __p0_190; \
  57521. int64x2_t __s1_190 = __p1_190; \
  57522. int32x4_t __ret_190; \
  57523. __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrn_n_s64(__s1_190, __p2_190)))); \
  57524. __ret_190; \
  57525. })
  57526. #else
  57527. #define vqrshrn_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
  57528. int32x2_t __s0_191 = __p0_191; \
  57529. int64x2_t __s1_191 = __p1_191; \
  57530. int32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
  57531. int64x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
  57532. int32x4_t __ret_191; \
  57533. __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_191, __p2_191)))); \
  57534. __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
  57535. __ret_191; \
  57536. })
  57537. #endif
  57538. #ifdef __LITTLE_ENDIAN__
  57539. #define vqrshrn_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
  57540. int8x8_t __s0_192 = __p0_192; \
  57541. int16x8_t __s1_192 = __p1_192; \
  57542. int8x16_t __ret_192; \
  57543. __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrn_n_s16(__s1_192, __p2_192)))); \
  57544. __ret_192; \
  57545. })
  57546. #else
  57547. #define vqrshrn_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
  57548. int8x8_t __s0_193 = __p0_193; \
  57549. int16x8_t __s1_193 = __p1_193; \
  57550. int8x8_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
  57551. int16x8_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
  57552. int8x16_t __ret_193; \
  57553. __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_193, __p2_193)))); \
  57554. __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  57555. __ret_193; \
  57556. })
  57557. #endif
  57558. #ifdef __LITTLE_ENDIAN__
  57559. #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
  57560. uint32_t __s0 = __p0; \
  57561. uint16_t __ret; \
  57562. __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
  57563. __ret; \
  57564. })
  57565. #else
  57566. #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
  57567. uint32_t __s0 = __p0; \
  57568. uint16_t __ret; \
  57569. __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
  57570. __ret; \
  57571. })
  57572. #endif
  57573. #ifdef __LITTLE_ENDIAN__
  57574. #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
  57575. uint64_t __s0 = __p0; \
  57576. uint32_t __ret; \
  57577. __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
  57578. __ret; \
  57579. })
  57580. #else
  57581. #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
  57582. uint64_t __s0 = __p0; \
  57583. uint32_t __ret; \
  57584. __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
  57585. __ret; \
  57586. })
  57587. #endif
  57588. #ifdef __LITTLE_ENDIAN__
  57589. #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
  57590. uint16_t __s0 = __p0; \
  57591. uint8_t __ret; \
  57592. __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
  57593. __ret; \
  57594. })
  57595. #else
  57596. #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
  57597. uint16_t __s0 = __p0; \
  57598. uint8_t __ret; \
  57599. __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
  57600. __ret; \
  57601. })
  57602. #endif
  57603. #ifdef __LITTLE_ENDIAN__
  57604. #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
  57605. int32_t __s0 = __p0; \
  57606. int16_t __ret; \
  57607. __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
  57608. __ret; \
  57609. })
  57610. #else
  57611. #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
  57612. int32_t __s0 = __p0; \
  57613. int16_t __ret; \
  57614. __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
  57615. __ret; \
  57616. })
  57617. #endif
  57618. #ifdef __LITTLE_ENDIAN__
  57619. #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
  57620. int64_t __s0 = __p0; \
  57621. int32_t __ret; \
  57622. __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
  57623. __ret; \
  57624. })
  57625. #else
  57626. #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
  57627. int64_t __s0 = __p0; \
  57628. int32_t __ret; \
  57629. __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
  57630. __ret; \
  57631. })
  57632. #endif
  57633. #ifdef __LITTLE_ENDIAN__
  57634. #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
  57635. int16_t __s0 = __p0; \
  57636. int8_t __ret; \
  57637. __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
  57638. __ret; \
  57639. })
  57640. #else
  57641. #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
  57642. int16_t __s0 = __p0; \
  57643. int8_t __ret; \
  57644. __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
  57645. __ret; \
  57646. })
  57647. #endif
  57648. #ifdef __LITTLE_ENDIAN__
  57649. #define vqrshrun_high_n_s32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
  57650. int16x4_t __s0_194 = __p0_194; \
  57651. int32x4_t __s1_194 = __p1_194; \
  57652. int16x8_t __ret_194; \
  57653. __ret_194 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_194), (int16x4_t)(vqrshrun_n_s32(__s1_194, __p2_194)))); \
  57654. __ret_194; \
  57655. })
  57656. #else
  57657. #define vqrshrun_high_n_s32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
  57658. int16x4_t __s0_195 = __p0_195; \
  57659. int32x4_t __s1_195 = __p1_195; \
  57660. int16x4_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
  57661. int32x4_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
  57662. int16x8_t __ret_195; \
  57663. __ret_195 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_195), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_195, __p2_195)))); \
  57664. __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
  57665. __ret_195; \
  57666. })
  57667. #endif
  57668. #ifdef __LITTLE_ENDIAN__
  57669. #define vqrshrun_high_n_s64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
  57670. int32x2_t __s0_196 = __p0_196; \
  57671. int64x2_t __s1_196 = __p1_196; \
  57672. int32x4_t __ret_196; \
  57673. __ret_196 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_196), (int32x2_t)(vqrshrun_n_s64(__s1_196, __p2_196)))); \
  57674. __ret_196; \
  57675. })
  57676. #else
  57677. #define vqrshrun_high_n_s64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
  57678. int32x2_t __s0_197 = __p0_197; \
  57679. int64x2_t __s1_197 = __p1_197; \
  57680. int32x2_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
  57681. int64x2_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
  57682. int32x4_t __ret_197; \
  57683. __ret_197 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_197), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_197, __p2_197)))); \
  57684. __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
  57685. __ret_197; \
  57686. })
  57687. #endif
  57688. #ifdef __LITTLE_ENDIAN__
  57689. #define vqrshrun_high_n_s16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
  57690. int8x8_t __s0_198 = __p0_198; \
  57691. int16x8_t __s1_198 = __p1_198; \
  57692. int8x16_t __ret_198; \
  57693. __ret_198 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_198), (int8x8_t)(vqrshrun_n_s16(__s1_198, __p2_198)))); \
  57694. __ret_198; \
  57695. })
  57696. #else
  57697. #define vqrshrun_high_n_s16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
  57698. int8x8_t __s0_199 = __p0_199; \
  57699. int16x8_t __s1_199 = __p1_199; \
  57700. int8x8_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
  57701. int16x8_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
  57702. int8x16_t __ret_199; \
  57703. __ret_199 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_199), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_199, __p2_199)))); \
  57704. __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  57705. __ret_199; \
  57706. })
  57707. #endif
  57708. #ifdef __LITTLE_ENDIAN__
  57709. #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
  57710. int32_t __s0 = __p0; \
  57711. int16_t __ret; \
  57712. __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
  57713. __ret; \
  57714. })
  57715. #else
  57716. #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
  57717. int32_t __s0 = __p0; \
  57718. int16_t __ret; \
  57719. __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
  57720. __ret; \
  57721. })
  57722. #endif
  57723. #ifdef __LITTLE_ENDIAN__
  57724. #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
  57725. int64_t __s0 = __p0; \
  57726. int32_t __ret; \
  57727. __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
  57728. __ret; \
  57729. })
  57730. #else
  57731. #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
  57732. int64_t __s0 = __p0; \
  57733. int32_t __ret; \
  57734. __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
  57735. __ret; \
  57736. })
  57737. #endif
  57738. #ifdef __LITTLE_ENDIAN__
  57739. #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
  57740. int16_t __s0 = __p0; \
  57741. int8_t __ret; \
  57742. __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
  57743. __ret; \
  57744. })
  57745. #else
  57746. #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
  57747. int16_t __s0 = __p0; \
  57748. int8_t __ret; \
  57749. __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
  57750. __ret; \
  57751. })
  57752. #endif
  57753. #ifdef __LITTLE_ENDIAN__
  57754. __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
  57755. uint8_t __ret;
  57756. __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
  57757. return __ret;
  57758. }
  57759. #else
  57760. __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
  57761. uint8_t __ret;
  57762. __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
  57763. return __ret;
  57764. }
  57765. #endif
  57766. #ifdef __LITTLE_ENDIAN__
  57767. __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
  57768. uint32_t __ret;
  57769. __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
  57770. return __ret;
  57771. }
  57772. #else
  57773. __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
  57774. uint32_t __ret;
  57775. __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
  57776. return __ret;
  57777. }
  57778. #endif
  57779. #ifdef __LITTLE_ENDIAN__
  57780. __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
  57781. uint64_t __ret;
  57782. __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
  57783. return __ret;
  57784. }
  57785. #else
  57786. __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
  57787. uint64_t __ret;
  57788. __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
  57789. return __ret;
  57790. }
  57791. #endif
  57792. #ifdef __LITTLE_ENDIAN__
  57793. __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
  57794. uint16_t __ret;
  57795. __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
  57796. return __ret;
  57797. }
  57798. #else
  57799. __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
  57800. uint16_t __ret;
  57801. __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
  57802. return __ret;
  57803. }
  57804. #endif
  57805. #ifdef __LITTLE_ENDIAN__
  57806. __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
  57807. int8_t __ret;
  57808. __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
  57809. return __ret;
  57810. }
  57811. #else
  57812. __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
  57813. int8_t __ret;
  57814. __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
  57815. return __ret;
  57816. }
  57817. #endif
  57818. #ifdef __LITTLE_ENDIAN__
  57819. __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
  57820. int32_t __ret;
  57821. __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
  57822. return __ret;
  57823. }
  57824. #else
  57825. __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
  57826. int32_t __ret;
  57827. __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
  57828. return __ret;
  57829. }
  57830. #endif
  57831. #ifdef __LITTLE_ENDIAN__
  57832. __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
  57833. int64_t __ret;
  57834. __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
  57835. return __ret;
  57836. }
  57837. #else
  57838. __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
  57839. int64_t __ret;
  57840. __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
  57841. return __ret;
  57842. }
  57843. #endif
  57844. #ifdef __LITTLE_ENDIAN__
  57845. __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
  57846. int16_t __ret;
  57847. __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
  57848. return __ret;
  57849. }
  57850. #else
  57851. __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
  57852. int16_t __ret;
  57853. __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
  57854. return __ret;
  57855. }
  57856. #endif
  57857. #ifdef __LITTLE_ENDIAN__
  57858. #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
  57859. uint8_t __s0 = __p0; \
  57860. uint8_t __ret; \
  57861. __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
  57862. __ret; \
  57863. })
  57864. #else
  57865. #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
  57866. uint8_t __s0 = __p0; \
  57867. uint8_t __ret; \
  57868. __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
  57869. __ret; \
  57870. })
  57871. #endif
  57872. #ifdef __LITTLE_ENDIAN__
  57873. #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
  57874. uint32_t __s0 = __p0; \
  57875. uint32_t __ret; \
  57876. __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
  57877. __ret; \
  57878. })
  57879. #else
  57880. #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
  57881. uint32_t __s0 = __p0; \
  57882. uint32_t __ret; \
  57883. __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
  57884. __ret; \
  57885. })
  57886. #endif
  57887. #ifdef __LITTLE_ENDIAN__
  57888. #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
  57889. uint64_t __s0 = __p0; \
  57890. uint64_t __ret; \
  57891. __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
  57892. __ret; \
  57893. })
  57894. #else
  57895. #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
  57896. uint64_t __s0 = __p0; \
  57897. uint64_t __ret; \
  57898. __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
  57899. __ret; \
  57900. })
  57901. #endif
  57902. #ifdef __LITTLE_ENDIAN__
  57903. #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
  57904. uint16_t __s0 = __p0; \
  57905. uint16_t __ret; \
  57906. __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
  57907. __ret; \
  57908. })
  57909. #else
  57910. #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
  57911. uint16_t __s0 = __p0; \
  57912. uint16_t __ret; \
  57913. __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
  57914. __ret; \
  57915. })
  57916. #endif
  57917. #ifdef __LITTLE_ENDIAN__
  57918. #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
  57919. int8_t __s0 = __p0; \
  57920. int8_t __ret; \
  57921. __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
  57922. __ret; \
  57923. })
  57924. #else
  57925. #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
  57926. int8_t __s0 = __p0; \
  57927. int8_t __ret; \
  57928. __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
  57929. __ret; \
  57930. })
  57931. #endif
  57932. #ifdef __LITTLE_ENDIAN__
  57933. #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
  57934. int32_t __s0 = __p0; \
  57935. int32_t __ret; \
  57936. __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
  57937. __ret; \
  57938. })
  57939. #else
  57940. #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
  57941. int32_t __s0 = __p0; \
  57942. int32_t __ret; \
  57943. __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
  57944. __ret; \
  57945. })
  57946. #endif
  57947. #ifdef __LITTLE_ENDIAN__
  57948. #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
  57949. int64_t __s0 = __p0; \
  57950. int64_t __ret; \
  57951. __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
  57952. __ret; \
  57953. })
  57954. #else
  57955. #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
  57956. int64_t __s0 = __p0; \
  57957. int64_t __ret; \
  57958. __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
  57959. __ret; \
  57960. })
  57961. #endif
  57962. #ifdef __LITTLE_ENDIAN__
  57963. #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
  57964. int16_t __s0 = __p0; \
  57965. int16_t __ret; \
  57966. __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
  57967. __ret; \
  57968. })
  57969. #else
  57970. #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
  57971. int16_t __s0 = __p0; \
  57972. int16_t __ret; \
  57973. __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
  57974. __ret; \
  57975. })
  57976. #endif
  57977. #ifdef __LITTLE_ENDIAN__
  57978. #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
  57979. int8_t __s0 = __p0; \
  57980. int8_t __ret; \
  57981. __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
  57982. __ret; \
  57983. })
  57984. #else
  57985. #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
  57986. int8_t __s0 = __p0; \
  57987. int8_t __ret; \
  57988. __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
  57989. __ret; \
  57990. })
  57991. #endif
  57992. #ifdef __LITTLE_ENDIAN__
  57993. #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
  57994. int32_t __s0 = __p0; \
  57995. int32_t __ret; \
  57996. __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
  57997. __ret; \
  57998. })
  57999. #else
  58000. #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
  58001. int32_t __s0 = __p0; \
  58002. int32_t __ret; \
  58003. __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
  58004. __ret; \
  58005. })
  58006. #endif
  58007. #ifdef __LITTLE_ENDIAN__
  58008. #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
  58009. int64_t __s0 = __p0; \
  58010. int64_t __ret; \
  58011. __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
  58012. __ret; \
  58013. })
  58014. #else
  58015. #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
  58016. int64_t __s0 = __p0; \
  58017. int64_t __ret; \
  58018. __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
  58019. __ret; \
  58020. })
  58021. #endif
  58022. #ifdef __LITTLE_ENDIAN__
  58023. #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
  58024. int16_t __s0 = __p0; \
  58025. int16_t __ret; \
  58026. __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
  58027. __ret; \
  58028. })
  58029. #else
  58030. #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
  58031. int16_t __s0 = __p0; \
  58032. int16_t __ret; \
  58033. __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
  58034. __ret; \
  58035. })
  58036. #endif
  58037. #ifdef __LITTLE_ENDIAN__
  58038. #define vqshrn_high_n_u32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
  58039. uint16x4_t __s0_200 = __p0_200; \
  58040. uint32x4_t __s1_200 = __p1_200; \
  58041. uint16x8_t __ret_200; \
  58042. __ret_200 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_200), (uint16x4_t)(vqshrn_n_u32(__s1_200, __p2_200)))); \
  58043. __ret_200; \
  58044. })
  58045. #else
  58046. #define vqshrn_high_n_u32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
  58047. uint16x4_t __s0_201 = __p0_201; \
  58048. uint32x4_t __s1_201 = __p1_201; \
  58049. uint16x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
  58050. uint32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
  58051. uint16x8_t __ret_201; \
  58052. __ret_201 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_201), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_201, __p2_201)))); \
  58053. __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
  58054. __ret_201; \
  58055. })
  58056. #endif
  58057. #ifdef __LITTLE_ENDIAN__
  58058. #define vqshrn_high_n_u64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
  58059. uint32x2_t __s0_202 = __p0_202; \
  58060. uint64x2_t __s1_202 = __p1_202; \
  58061. uint32x4_t __ret_202; \
  58062. __ret_202 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_202), (uint32x2_t)(vqshrn_n_u64(__s1_202, __p2_202)))); \
  58063. __ret_202; \
  58064. })
  58065. #else
  58066. #define vqshrn_high_n_u64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
  58067. uint32x2_t __s0_203 = __p0_203; \
  58068. uint64x2_t __s1_203 = __p1_203; \
  58069. uint32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
  58070. uint64x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
  58071. uint32x4_t __ret_203; \
  58072. __ret_203 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_203), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_203, __p2_203)))); \
  58073. __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
  58074. __ret_203; \
  58075. })
  58076. #endif
  58077. #ifdef __LITTLE_ENDIAN__
  58078. #define vqshrn_high_n_u16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
  58079. uint8x8_t __s0_204 = __p0_204; \
  58080. uint16x8_t __s1_204 = __p1_204; \
  58081. uint8x16_t __ret_204; \
  58082. __ret_204 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_204), (uint8x8_t)(vqshrn_n_u16(__s1_204, __p2_204)))); \
  58083. __ret_204; \
  58084. })
  58085. #else
  58086. #define vqshrn_high_n_u16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
  58087. uint8x8_t __s0_205 = __p0_205; \
  58088. uint16x8_t __s1_205 = __p1_205; \
  58089. uint8x8_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
  58090. uint16x8_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
  58091. uint8x16_t __ret_205; \
  58092. __ret_205 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_205), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_205, __p2_205)))); \
  58093. __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  58094. __ret_205; \
  58095. })
  58096. #endif
  58097. #ifdef __LITTLE_ENDIAN__
  58098. #define vqshrn_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
  58099. int16x4_t __s0_206 = __p0_206; \
  58100. int32x4_t __s1_206 = __p1_206; \
  58101. int16x8_t __ret_206; \
  58102. __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrn_n_s32(__s1_206, __p2_206)))); \
  58103. __ret_206; \
  58104. })
  58105. #else
  58106. #define vqshrn_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
  58107. int16x4_t __s0_207 = __p0_207; \
  58108. int32x4_t __s1_207 = __p1_207; \
  58109. int16x4_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
  58110. int32x4_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
  58111. int16x8_t __ret_207; \
  58112. __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_207, __p2_207)))); \
  58113. __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
  58114. __ret_207; \
  58115. })
  58116. #endif
  58117. #ifdef __LITTLE_ENDIAN__
  58118. #define vqshrn_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
  58119. int32x2_t __s0_208 = __p0_208; \
  58120. int64x2_t __s1_208 = __p1_208; \
  58121. int32x4_t __ret_208; \
  58122. __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrn_n_s64(__s1_208, __p2_208)))); \
  58123. __ret_208; \
  58124. })
  58125. #else
  58126. #define vqshrn_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
  58127. int32x2_t __s0_209 = __p0_209; \
  58128. int64x2_t __s1_209 = __p1_209; \
  58129. int32x2_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
  58130. int64x2_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
  58131. int32x4_t __ret_209; \
  58132. __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_209, __p2_209)))); \
  58133. __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
  58134. __ret_209; \
  58135. })
  58136. #endif
  58137. #ifdef __LITTLE_ENDIAN__
  58138. #define vqshrn_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
  58139. int8x8_t __s0_210 = __p0_210; \
  58140. int16x8_t __s1_210 = __p1_210; \
  58141. int8x16_t __ret_210; \
  58142. __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrn_n_s16(__s1_210, __p2_210)))); \
  58143. __ret_210; \
  58144. })
  58145. #else
  58146. #define vqshrn_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
  58147. int8x8_t __s0_211 = __p0_211; \
  58148. int16x8_t __s1_211 = __p1_211; \
  58149. int8x8_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
  58150. int16x8_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
  58151. int8x16_t __ret_211; \
  58152. __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_211, __p2_211)))); \
  58153. __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  58154. __ret_211; \
  58155. })
  58156. #endif
  58157. #ifdef __LITTLE_ENDIAN__
  58158. #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
  58159. uint32_t __s0 = __p0; \
  58160. uint16_t __ret; \
  58161. __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
  58162. __ret; \
  58163. })
  58164. #else
  58165. #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
  58166. uint32_t __s0 = __p0; \
  58167. uint16_t __ret; \
  58168. __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
  58169. __ret; \
  58170. })
  58171. #endif
  58172. #ifdef __LITTLE_ENDIAN__
  58173. #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
  58174. uint64_t __s0 = __p0; \
  58175. uint32_t __ret; \
  58176. __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
  58177. __ret; \
  58178. })
  58179. #else
  58180. #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
  58181. uint64_t __s0 = __p0; \
  58182. uint32_t __ret; \
  58183. __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
  58184. __ret; \
  58185. })
  58186. #endif
  58187. #ifdef __LITTLE_ENDIAN__
  58188. #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
  58189. uint16_t __s0 = __p0; \
  58190. uint8_t __ret; \
  58191. __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
  58192. __ret; \
  58193. })
  58194. #else
  58195. #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
  58196. uint16_t __s0 = __p0; \
  58197. uint8_t __ret; \
  58198. __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
  58199. __ret; \
  58200. })
  58201. #endif
  58202. #ifdef __LITTLE_ENDIAN__
  58203. #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
  58204. int32_t __s0 = __p0; \
  58205. int16_t __ret; \
  58206. __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
  58207. __ret; \
  58208. })
  58209. #else
  58210. #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
  58211. int32_t __s0 = __p0; \
  58212. int16_t __ret; \
  58213. __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
  58214. __ret; \
  58215. })
  58216. #endif
  58217. #ifdef __LITTLE_ENDIAN__
  58218. #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
  58219. int64_t __s0 = __p0; \
  58220. int32_t __ret; \
  58221. __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
  58222. __ret; \
  58223. })
  58224. #else
  58225. #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
  58226. int64_t __s0 = __p0; \
  58227. int32_t __ret; \
  58228. __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
  58229. __ret; \
  58230. })
  58231. #endif
  58232. #ifdef __LITTLE_ENDIAN__
  58233. #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
  58234. int16_t __s0 = __p0; \
  58235. int8_t __ret; \
  58236. __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
  58237. __ret; \
  58238. })
  58239. #else
  58240. #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
  58241. int16_t __s0 = __p0; \
  58242. int8_t __ret; \
  58243. __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
  58244. __ret; \
  58245. })
  58246. #endif
  58247. #ifdef __LITTLE_ENDIAN__
  58248. #define vqshrun_high_n_s32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
  58249. int16x4_t __s0_212 = __p0_212; \
  58250. int32x4_t __s1_212 = __p1_212; \
  58251. int16x8_t __ret_212; \
  58252. __ret_212 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_212), (int16x4_t)(vqshrun_n_s32(__s1_212, __p2_212)))); \
  58253. __ret_212; \
  58254. })
  58255. #else
  58256. #define vqshrun_high_n_s32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
  58257. int16x4_t __s0_213 = __p0_213; \
  58258. int32x4_t __s1_213 = __p1_213; \
  58259. int16x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
  58260. int32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
  58261. int16x8_t __ret_213; \
  58262. __ret_213 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_213), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_213, __p2_213)))); \
  58263. __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
  58264. __ret_213; \
  58265. })
  58266. #endif
  58267. #ifdef __LITTLE_ENDIAN__
  58268. #define vqshrun_high_n_s64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
  58269. int32x2_t __s0_214 = __p0_214; \
  58270. int64x2_t __s1_214 = __p1_214; \
  58271. int32x4_t __ret_214; \
  58272. __ret_214 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_214), (int32x2_t)(vqshrun_n_s64(__s1_214, __p2_214)))); \
  58273. __ret_214; \
  58274. })
  58275. #else
  58276. #define vqshrun_high_n_s64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
  58277. int32x2_t __s0_215 = __p0_215; \
  58278. int64x2_t __s1_215 = __p1_215; \
  58279. int32x2_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
  58280. int64x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
  58281. int32x4_t __ret_215; \
  58282. __ret_215 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_215), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_215, __p2_215)))); \
  58283. __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
  58284. __ret_215; \
  58285. })
  58286. #endif
  58287. #ifdef __LITTLE_ENDIAN__
  58288. #define vqshrun_high_n_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
  58289. int8x8_t __s0_216 = __p0_216; \
  58290. int16x8_t __s1_216 = __p1_216; \
  58291. int8x16_t __ret_216; \
  58292. __ret_216 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_216), (int8x8_t)(vqshrun_n_s16(__s1_216, __p2_216)))); \
  58293. __ret_216; \
  58294. })
  58295. #else
  58296. #define vqshrun_high_n_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
  58297. int8x8_t __s0_217 = __p0_217; \
  58298. int16x8_t __s1_217 = __p1_217; \
  58299. int8x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
  58300. int16x8_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
  58301. int8x16_t __ret_217; \
  58302. __ret_217 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_217), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_217, __p2_217)))); \
  58303. __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  58304. __ret_217; \
  58305. })
  58306. #endif
  58307. #ifdef __LITTLE_ENDIAN__
  58308. #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
  58309. int32_t __s0 = __p0; \
  58310. int16_t __ret; \
  58311. __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
  58312. __ret; \
  58313. })
  58314. #else
  58315. #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
  58316. int32_t __s0 = __p0; \
  58317. int16_t __ret; \
  58318. __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
  58319. __ret; \
  58320. })
  58321. #endif
  58322. #ifdef __LITTLE_ENDIAN__
  58323. #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
  58324. int64_t __s0 = __p0; \
  58325. int32_t __ret; \
  58326. __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
  58327. __ret; \
  58328. })
  58329. #else
  58330. #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
  58331. int64_t __s0 = __p0; \
  58332. int32_t __ret; \
  58333. __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
  58334. __ret; \
  58335. })
  58336. #endif
  58337. #ifdef __LITTLE_ENDIAN__
  58338. #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
  58339. int16_t __s0 = __p0; \
  58340. int8_t __ret; \
  58341. __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
  58342. __ret; \
  58343. })
  58344. #else
  58345. #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
  58346. int16_t __s0 = __p0; \
  58347. int8_t __ret; \
  58348. __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
  58349. __ret; \
  58350. })
  58351. #endif
  58352. #ifdef __LITTLE_ENDIAN__
  58353. __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
  58354. uint8_t __ret;
  58355. __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
  58356. return __ret;
  58357. }
  58358. #else
  58359. __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
  58360. uint8_t __ret;
  58361. __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
  58362. return __ret;
  58363. }
  58364. #endif
  58365. #ifdef __LITTLE_ENDIAN__
  58366. __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
  58367. uint32_t __ret;
  58368. __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
  58369. return __ret;
  58370. }
  58371. #else
  58372. __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
  58373. uint32_t __ret;
  58374. __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
  58375. return __ret;
  58376. }
  58377. #endif
  58378. #ifdef __LITTLE_ENDIAN__
  58379. __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
  58380. uint64_t __ret;
  58381. __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
  58382. return __ret;
  58383. }
  58384. #else
  58385. __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
  58386. uint64_t __ret;
  58387. __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
  58388. return __ret;
  58389. }
  58390. #endif
  58391. #ifdef __LITTLE_ENDIAN__
  58392. __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
  58393. uint16_t __ret;
  58394. __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
  58395. return __ret;
  58396. }
  58397. #else
  58398. __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
  58399. uint16_t __ret;
  58400. __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
  58401. return __ret;
  58402. }
  58403. #endif
  58404. #ifdef __LITTLE_ENDIAN__
  58405. __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
  58406. int8_t __ret;
  58407. __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
  58408. return __ret;
  58409. }
  58410. #else
  58411. __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
  58412. int8_t __ret;
  58413. __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
  58414. return __ret;
  58415. }
  58416. #endif
  58417. #ifdef __LITTLE_ENDIAN__
  58418. __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
  58419. int32_t __ret;
  58420. __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
  58421. return __ret;
  58422. }
  58423. #else
  58424. __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
  58425. int32_t __ret;
  58426. __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
  58427. return __ret;
  58428. }
  58429. __ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) {
  58430. int32_t __ret;
  58431. __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
  58432. return __ret;
  58433. }
  58434. #endif
  58435. #ifdef __LITTLE_ENDIAN__
  58436. __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
  58437. int64_t __ret;
  58438. __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
  58439. return __ret;
  58440. }
  58441. #else
  58442. __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
  58443. int64_t __ret;
  58444. __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
  58445. return __ret;
  58446. }
  58447. #endif
  58448. #ifdef __LITTLE_ENDIAN__
  58449. __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
  58450. int16_t __ret;
  58451. __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
  58452. return __ret;
  58453. }
  58454. #else
  58455. __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
  58456. int16_t __ret;
  58457. __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
  58458. return __ret;
  58459. }
  58460. __ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) {
  58461. int16_t __ret;
  58462. __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
  58463. return __ret;
  58464. }
  58465. #endif
  58466. #ifdef __LITTLE_ENDIAN__
  58467. __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
  58468. poly8x8_t __ret;
  58469. __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
  58470. return __ret;
  58471. }
  58472. #else
  58473. __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
  58474. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58475. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58476. poly8x8_t __ret;
  58477. __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
  58478. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58479. return __ret;
  58480. }
  58481. #endif
  58482. #ifdef __LITTLE_ENDIAN__
  58483. __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
  58484. poly8x16_t __ret;
  58485. __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
  58486. return __ret;
  58487. }
  58488. #else
  58489. __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
  58490. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58491. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58492. poly8x16_t __ret;
  58493. __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
  58494. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58495. return __ret;
  58496. }
  58497. #endif
  58498. #ifdef __LITTLE_ENDIAN__
  58499. __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  58500. uint8x16_t __ret;
  58501. __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  58502. return __ret;
  58503. }
  58504. #else
  58505. __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  58506. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58507. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58508. uint8x16_t __ret;
  58509. __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  58510. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58511. return __ret;
  58512. }
  58513. #endif
  58514. #ifdef __LITTLE_ENDIAN__
  58515. __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
  58516. int8x16_t __ret;
  58517. __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  58518. return __ret;
  58519. }
  58520. #else
  58521. __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
  58522. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58523. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58524. int8x16_t __ret;
  58525. __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  58526. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58527. return __ret;
  58528. }
  58529. #endif
  58530. #ifdef __LITTLE_ENDIAN__
  58531. __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
  58532. uint8x8_t __ret;
  58533. __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
  58534. return __ret;
  58535. }
  58536. #else
  58537. __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
  58538. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58539. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58540. uint8x8_t __ret;
  58541. __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
  58542. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58543. return __ret;
  58544. }
  58545. #endif
  58546. #ifdef __LITTLE_ENDIAN__
  58547. __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
  58548. int8x8_t __ret;
  58549. __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
  58550. return __ret;
  58551. }
  58552. #else
  58553. __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
  58554. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58555. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58556. int8x8_t __ret;
  58557. __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
  58558. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58559. return __ret;
  58560. }
  58561. #endif
  58562. #ifdef __LITTLE_ENDIAN__
  58563. __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
  58564. poly8x8_t __ret;
  58565. __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
  58566. return __ret;
  58567. }
  58568. #else
  58569. __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
  58570. poly8x16x2_t __rev0;
  58571. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58572. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58573. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58574. poly8x8_t __ret;
  58575. __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
  58576. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58577. return __ret;
  58578. }
  58579. #endif
  58580. #ifdef __LITTLE_ENDIAN__
  58581. __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
  58582. poly8x16_t __ret;
  58583. __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
  58584. return __ret;
  58585. }
  58586. #else
  58587. __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
  58588. poly8x16x2_t __rev0;
  58589. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58590. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58591. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58592. poly8x16_t __ret;
  58593. __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
  58594. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58595. return __ret;
  58596. }
  58597. #endif
  58598. #ifdef __LITTLE_ENDIAN__
  58599. __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
  58600. uint8x16_t __ret;
  58601. __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
  58602. return __ret;
  58603. }
  58604. #else
  58605. __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
  58606. uint8x16x2_t __rev0;
  58607. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58608. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58609. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58610. uint8x16_t __ret;
  58611. __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
  58612. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58613. return __ret;
  58614. }
  58615. #endif
  58616. #ifdef __LITTLE_ENDIAN__
  58617. __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
  58618. int8x16_t __ret;
  58619. __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
  58620. return __ret;
  58621. }
  58622. #else
  58623. __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
  58624. int8x16x2_t __rev0;
  58625. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58626. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58627. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58628. int8x16_t __ret;
  58629. __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
  58630. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58631. return __ret;
  58632. }
  58633. #endif
  58634. #ifdef __LITTLE_ENDIAN__
  58635. __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
  58636. uint8x8_t __ret;
  58637. __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
  58638. return __ret;
  58639. }
  58640. #else
  58641. __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
  58642. uint8x16x2_t __rev0;
  58643. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58644. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58645. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58646. uint8x8_t __ret;
  58647. __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
  58648. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58649. return __ret;
  58650. }
  58651. #endif
  58652. #ifdef __LITTLE_ENDIAN__
  58653. __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
  58654. int8x8_t __ret;
  58655. __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
  58656. return __ret;
  58657. }
  58658. #else
  58659. __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
  58660. int8x16x2_t __rev0;
  58661. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58662. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58663. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58664. int8x8_t __ret;
  58665. __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
  58666. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58667. return __ret;
  58668. }
  58669. #endif
  58670. #ifdef __LITTLE_ENDIAN__
  58671. __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
  58672. poly8x8_t __ret;
  58673. __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
  58674. return __ret;
  58675. }
  58676. #else
  58677. __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
  58678. poly8x16x3_t __rev0;
  58679. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58680. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58681. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58682. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58683. poly8x8_t __ret;
  58684. __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
  58685. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58686. return __ret;
  58687. }
  58688. #endif
  58689. #ifdef __LITTLE_ENDIAN__
  58690. __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
  58691. poly8x16_t __ret;
  58692. __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
  58693. return __ret;
  58694. }
  58695. #else
  58696. __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
  58697. poly8x16x3_t __rev0;
  58698. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58699. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58700. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58701. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58702. poly8x16_t __ret;
  58703. __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
  58704. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58705. return __ret;
  58706. }
  58707. #endif
  58708. #ifdef __LITTLE_ENDIAN__
  58709. __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
  58710. uint8x16_t __ret;
  58711. __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
  58712. return __ret;
  58713. }
  58714. #else
  58715. __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
  58716. uint8x16x3_t __rev0;
  58717. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58718. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58719. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58720. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58721. uint8x16_t __ret;
  58722. __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
  58723. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58724. return __ret;
  58725. }
  58726. #endif
  58727. #ifdef __LITTLE_ENDIAN__
  58728. __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
  58729. int8x16_t __ret;
  58730. __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
  58731. return __ret;
  58732. }
  58733. #else
  58734. __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
  58735. int8x16x3_t __rev0;
  58736. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58737. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58738. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58739. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58740. int8x16_t __ret;
  58741. __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
  58742. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58743. return __ret;
  58744. }
  58745. #endif
  58746. #ifdef __LITTLE_ENDIAN__
  58747. __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
  58748. uint8x8_t __ret;
  58749. __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
  58750. return __ret;
  58751. }
  58752. #else
  58753. __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
  58754. uint8x16x3_t __rev0;
  58755. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58756. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58757. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58758. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58759. uint8x8_t __ret;
  58760. __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
  58761. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58762. return __ret;
  58763. }
  58764. #endif
  58765. #ifdef __LITTLE_ENDIAN__
  58766. __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
  58767. int8x8_t __ret;
  58768. __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
  58769. return __ret;
  58770. }
  58771. #else
  58772. __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
  58773. int8x16x3_t __rev0;
  58774. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58775. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58776. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58777. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58778. int8x8_t __ret;
  58779. __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
  58780. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58781. return __ret;
  58782. }
  58783. #endif
  58784. #ifdef __LITTLE_ENDIAN__
  58785. __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
  58786. poly8x8_t __ret;
  58787. __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
  58788. return __ret;
  58789. }
  58790. #else
  58791. __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
  58792. poly8x16x4_t __rev0;
  58793. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58794. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58795. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58796. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58797. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58798. poly8x8_t __ret;
  58799. __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
  58800. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58801. return __ret;
  58802. }
  58803. #endif
  58804. #ifdef __LITTLE_ENDIAN__
  58805. __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
  58806. poly8x16_t __ret;
  58807. __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
  58808. return __ret;
  58809. }
  58810. #else
  58811. __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
  58812. poly8x16x4_t __rev0;
  58813. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58814. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58815. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58816. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58817. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58818. poly8x16_t __ret;
  58819. __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
  58820. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58821. return __ret;
  58822. }
  58823. #endif
  58824. #ifdef __LITTLE_ENDIAN__
  58825. __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
  58826. uint8x16_t __ret;
  58827. __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
  58828. return __ret;
  58829. }
  58830. #else
  58831. __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
  58832. uint8x16x4_t __rev0;
  58833. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58834. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58835. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58836. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58837. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58838. uint8x16_t __ret;
  58839. __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
  58840. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58841. return __ret;
  58842. }
  58843. #endif
  58844. #ifdef __LITTLE_ENDIAN__
  58845. __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
  58846. int8x16_t __ret;
  58847. __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
  58848. return __ret;
  58849. }
  58850. #else
  58851. __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
  58852. int8x16x4_t __rev0;
  58853. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58854. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58855. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58856. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58857. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58858. int8x16_t __ret;
  58859. __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
  58860. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58861. return __ret;
  58862. }
  58863. #endif
  58864. #ifdef __LITTLE_ENDIAN__
  58865. __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
  58866. uint8x8_t __ret;
  58867. __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
  58868. return __ret;
  58869. }
  58870. #else
  58871. __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
  58872. uint8x16x4_t __rev0;
  58873. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58874. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58875. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58876. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58877. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58878. uint8x8_t __ret;
  58879. __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
  58880. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58881. return __ret;
  58882. }
  58883. #endif
  58884. #ifdef __LITTLE_ENDIAN__
  58885. __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
  58886. int8x8_t __ret;
  58887. __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
  58888. return __ret;
  58889. }
  58890. #else
  58891. __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
  58892. int8x16x4_t __rev0;
  58893. __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58894. __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58895. __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58896. __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58897. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  58898. int8x8_t __ret;
  58899. __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
  58900. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58901. return __ret;
  58902. }
  58903. #endif
  58904. #ifdef __LITTLE_ENDIAN__
  58905. __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
  58906. poly8x8_t __ret;
  58907. __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
  58908. return __ret;
  58909. }
  58910. #else
  58911. __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
  58912. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  58913. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58914. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  58915. poly8x8_t __ret;
  58916. __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
  58917. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58918. return __ret;
  58919. }
  58920. #endif
  58921. #ifdef __LITTLE_ENDIAN__
  58922. __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
  58923. poly8x16_t __ret;
  58924. __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
  58925. return __ret;
  58926. }
  58927. #else
  58928. __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
  58929. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58930. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58931. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58932. poly8x16_t __ret;
  58933. __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
  58934. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58935. return __ret;
  58936. }
  58937. #endif
  58938. #ifdef __LITTLE_ENDIAN__
  58939. __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  58940. uint8x16_t __ret;
  58941. __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
  58942. return __ret;
  58943. }
  58944. #else
  58945. __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  58946. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58947. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58948. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58949. uint8x16_t __ret;
  58950. __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
  58951. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58952. return __ret;
  58953. }
  58954. #endif
  58955. #ifdef __LITTLE_ENDIAN__
  58956. __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  58957. int8x16_t __ret;
  58958. __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
  58959. return __ret;
  58960. }
  58961. #else
  58962. __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  58963. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58964. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58965. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58966. int8x16_t __ret;
  58967. __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
  58968. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58969. return __ret;
  58970. }
  58971. #endif
  58972. #ifdef __LITTLE_ENDIAN__
  58973. __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
  58974. uint8x8_t __ret;
  58975. __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
  58976. return __ret;
  58977. }
  58978. #else
  58979. __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
  58980. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  58981. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58982. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  58983. uint8x8_t __ret;
  58984. __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
  58985. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  58986. return __ret;
  58987. }
  58988. #endif
  58989. #ifdef __LITTLE_ENDIAN__
  58990. __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
  58991. int8x8_t __ret;
  58992. __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
  58993. return __ret;
  58994. }
  58995. #else
  58996. __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
  58997. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  58998. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  58999. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59000. int8x8_t __ret;
  59001. __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
  59002. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59003. return __ret;
  59004. }
  59005. #endif
  59006. #ifdef __LITTLE_ENDIAN__
  59007. __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
  59008. poly8x8_t __ret;
  59009. __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
  59010. return __ret;
  59011. }
  59012. #else
  59013. __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
  59014. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59015. poly8x16x2_t __rev1;
  59016. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59017. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59018. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59019. poly8x8_t __ret;
  59020. __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
  59021. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59022. return __ret;
  59023. }
  59024. #endif
  59025. #ifdef __LITTLE_ENDIAN__
  59026. __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
  59027. poly8x16_t __ret;
  59028. __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
  59029. return __ret;
  59030. }
  59031. #else
  59032. __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
  59033. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59034. poly8x16x2_t __rev1;
  59035. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59036. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59037. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59038. poly8x16_t __ret;
  59039. __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
  59040. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59041. return __ret;
  59042. }
  59043. #endif
  59044. #ifdef __LITTLE_ENDIAN__
  59045. __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
  59046. uint8x16_t __ret;
  59047. __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
  59048. return __ret;
  59049. }
  59050. #else
  59051. __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
  59052. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59053. uint8x16x2_t __rev1;
  59054. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59055. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59056. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59057. uint8x16_t __ret;
  59058. __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
  59059. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59060. return __ret;
  59061. }
  59062. #endif
  59063. #ifdef __LITTLE_ENDIAN__
  59064. __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
  59065. int8x16_t __ret;
  59066. __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
  59067. return __ret;
  59068. }
  59069. #else
  59070. __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
  59071. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59072. int8x16x2_t __rev1;
  59073. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59074. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59075. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59076. int8x16_t __ret;
  59077. __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
  59078. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59079. return __ret;
  59080. }
  59081. #endif
  59082. #ifdef __LITTLE_ENDIAN__
  59083. __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
  59084. uint8x8_t __ret;
  59085. __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
  59086. return __ret;
  59087. }
  59088. #else
  59089. __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
  59090. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59091. uint8x16x2_t __rev1;
  59092. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59093. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59094. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59095. uint8x8_t __ret;
  59096. __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
  59097. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59098. return __ret;
  59099. }
  59100. #endif
  59101. #ifdef __LITTLE_ENDIAN__
  59102. __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
  59103. int8x8_t __ret;
  59104. __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
  59105. return __ret;
  59106. }
  59107. #else
  59108. __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
  59109. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59110. int8x16x2_t __rev1;
  59111. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59112. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59113. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59114. int8x8_t __ret;
  59115. __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
  59116. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59117. return __ret;
  59118. }
  59119. #endif
  59120. #ifdef __LITTLE_ENDIAN__
  59121. __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
  59122. poly8x8_t __ret;
  59123. __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
  59124. return __ret;
  59125. }
  59126. #else
  59127. __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
  59128. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59129. poly8x16x3_t __rev1;
  59130. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59131. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59132. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59133. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59134. poly8x8_t __ret;
  59135. __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
  59136. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59137. return __ret;
  59138. }
  59139. #endif
  59140. #ifdef __LITTLE_ENDIAN__
  59141. __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
  59142. poly8x16_t __ret;
  59143. __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
  59144. return __ret;
  59145. }
  59146. #else
  59147. __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
  59148. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59149. poly8x16x3_t __rev1;
  59150. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59151. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59152. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59153. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59154. poly8x16_t __ret;
  59155. __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
  59156. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59157. return __ret;
  59158. }
  59159. #endif
  59160. #ifdef __LITTLE_ENDIAN__
  59161. __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
  59162. uint8x16_t __ret;
  59163. __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
  59164. return __ret;
  59165. }
  59166. #else
  59167. __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
  59168. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59169. uint8x16x3_t __rev1;
  59170. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59171. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59172. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59173. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59174. uint8x16_t __ret;
  59175. __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
  59176. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59177. return __ret;
  59178. }
  59179. #endif
  59180. #ifdef __LITTLE_ENDIAN__
  59181. __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
  59182. int8x16_t __ret;
  59183. __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
  59184. return __ret;
  59185. }
  59186. #else
  59187. __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
  59188. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59189. int8x16x3_t __rev1;
  59190. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59191. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59192. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59193. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59194. int8x16_t __ret;
  59195. __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
  59196. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59197. return __ret;
  59198. }
  59199. #endif
  59200. #ifdef __LITTLE_ENDIAN__
  59201. __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
  59202. uint8x8_t __ret;
  59203. __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
  59204. return __ret;
  59205. }
  59206. #else
  59207. __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
  59208. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59209. uint8x16x3_t __rev1;
  59210. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59211. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59212. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59213. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59214. uint8x8_t __ret;
  59215. __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
  59216. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59217. return __ret;
  59218. }
  59219. #endif
  59220. #ifdef __LITTLE_ENDIAN__
  59221. __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
  59222. int8x8_t __ret;
  59223. __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
  59224. return __ret;
  59225. }
  59226. #else
  59227. __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
  59228. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59229. int8x16x3_t __rev1;
  59230. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59231. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59232. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59233. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59234. int8x8_t __ret;
  59235. __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
  59236. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59237. return __ret;
  59238. }
  59239. #endif
  59240. #ifdef __LITTLE_ENDIAN__
  59241. __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
  59242. poly8x8_t __ret;
  59243. __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
  59244. return __ret;
  59245. }
  59246. #else
  59247. __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
  59248. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59249. poly8x16x4_t __rev1;
  59250. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59251. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59252. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59253. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59254. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59255. poly8x8_t __ret;
  59256. __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
  59257. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59258. return __ret;
  59259. }
  59260. #endif
  59261. #ifdef __LITTLE_ENDIAN__
  59262. __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
  59263. poly8x16_t __ret;
  59264. __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
  59265. return __ret;
  59266. }
  59267. #else
  59268. __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
  59269. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59270. poly8x16x4_t __rev1;
  59271. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59272. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59273. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59274. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59275. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59276. poly8x16_t __ret;
  59277. __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
  59278. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59279. return __ret;
  59280. }
  59281. #endif
  59282. #ifdef __LITTLE_ENDIAN__
  59283. __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
  59284. uint8x16_t __ret;
  59285. __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
  59286. return __ret;
  59287. }
  59288. #else
  59289. __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
  59290. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59291. uint8x16x4_t __rev1;
  59292. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59293. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59294. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59295. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59296. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59297. uint8x16_t __ret;
  59298. __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
  59299. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59300. return __ret;
  59301. }
  59302. #endif
  59303. #ifdef __LITTLE_ENDIAN__
  59304. __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
  59305. int8x16_t __ret;
  59306. __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
  59307. return __ret;
  59308. }
  59309. #else
  59310. __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
  59311. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59312. int8x16x4_t __rev1;
  59313. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59314. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59315. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59316. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59317. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59318. int8x16_t __ret;
  59319. __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
  59320. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59321. return __ret;
  59322. }
  59323. #endif
  59324. #ifdef __LITTLE_ENDIAN__
  59325. __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
  59326. uint8x8_t __ret;
  59327. __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
  59328. return __ret;
  59329. }
  59330. #else
  59331. __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
  59332. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59333. uint8x16x4_t __rev1;
  59334. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59335. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59336. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59337. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59338. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59339. uint8x8_t __ret;
  59340. __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
  59341. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59342. return __ret;
  59343. }
  59344. #endif
  59345. #ifdef __LITTLE_ENDIAN__
  59346. __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
  59347. int8x8_t __ret;
  59348. __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
  59349. return __ret;
  59350. }
  59351. #else
  59352. __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
  59353. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59354. int8x16x4_t __rev1;
  59355. __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59356. __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59357. __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59358. __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59359. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59360. int8x8_t __ret;
  59361. __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
  59362. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59363. return __ret;
  59364. }
  59365. #endif
  59366. #ifdef __LITTLE_ENDIAN__
  59367. __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  59368. uint16x8_t __ret;
  59369. __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
  59370. return __ret;
  59371. }
  59372. #else
  59373. __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  59374. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  59375. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  59376. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  59377. uint16x8_t __ret;
  59378. __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
  59379. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59380. return __ret;
  59381. }
  59382. #endif
  59383. #ifdef __LITTLE_ENDIAN__
  59384. __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  59385. uint32x4_t __ret;
  59386. __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
  59387. return __ret;
  59388. }
  59389. #else
  59390. __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  59391. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59392. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  59393. uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  59394. uint32x4_t __ret;
  59395. __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
  59396. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  59397. return __ret;
  59398. }
  59399. #endif
  59400. #ifdef __LITTLE_ENDIAN__
  59401. __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  59402. uint8x16_t __ret;
  59403. __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
  59404. return __ret;
  59405. }
  59406. #else
  59407. __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  59408. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59409. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  59410. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59411. uint8x16_t __ret;
  59412. __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
  59413. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59414. return __ret;
  59415. }
  59416. #endif
  59417. #ifdef __LITTLE_ENDIAN__
  59418. __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  59419. int16x8_t __ret;
  59420. __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
  59421. return __ret;
  59422. }
  59423. #else
  59424. __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  59425. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  59426. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  59427. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  59428. int16x8_t __ret;
  59429. __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
  59430. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59431. return __ret;
  59432. }
  59433. #endif
  59434. #ifdef __LITTLE_ENDIAN__
  59435. __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  59436. int32x4_t __ret;
  59437. __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
  59438. return __ret;
  59439. }
  59440. #else
  59441. __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  59442. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59443. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  59444. int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  59445. int32x4_t __ret;
  59446. __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
  59447. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  59448. return __ret;
  59449. }
  59450. #endif
  59451. #ifdef __LITTLE_ENDIAN__
  59452. __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  59453. int8x16_t __ret;
  59454. __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
  59455. return __ret;
  59456. }
  59457. #else
  59458. __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  59459. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59460. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  59461. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  59462. int8x16_t __ret;
  59463. __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
  59464. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59465. return __ret;
  59466. }
  59467. #endif
  59468. #ifdef __LITTLE_ENDIAN__
  59469. __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
  59470. poly8x8_t __ret;
  59471. __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
  59472. return __ret;
  59473. }
  59474. #else
  59475. __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
  59476. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59477. poly8x8_t __ret;
  59478. __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
  59479. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59480. return __ret;
  59481. }
  59482. #endif
  59483. #ifdef __LITTLE_ENDIAN__
  59484. __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
  59485. poly8x16_t __ret;
  59486. __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
  59487. return __ret;
  59488. }
  59489. #else
  59490. __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
  59491. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59492. poly8x16_t __ret;
  59493. __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
  59494. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59495. return __ret;
  59496. }
  59497. #endif
  59498. #ifdef __LITTLE_ENDIAN__
  59499. __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
  59500. uint8x16_t __ret;
  59501. __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
  59502. return __ret;
  59503. }
  59504. #else
  59505. __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
  59506. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59507. uint8x16_t __ret;
  59508. __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
  59509. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59510. return __ret;
  59511. }
  59512. #endif
  59513. #ifdef __LITTLE_ENDIAN__
  59514. __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
  59515. int8x16_t __ret;
  59516. __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
  59517. return __ret;
  59518. }
  59519. #else
  59520. __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
  59521. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59522. int8x16_t __ret;
  59523. __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
  59524. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  59525. return __ret;
  59526. }
  59527. #endif
  59528. #ifdef __LITTLE_ENDIAN__
  59529. __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
  59530. uint8x8_t __ret;
  59531. __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
  59532. return __ret;
  59533. }
  59534. #else
  59535. __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
  59536. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59537. uint8x8_t __ret;
  59538. __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
  59539. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59540. return __ret;
  59541. }
  59542. #endif
  59543. #ifdef __LITTLE_ENDIAN__
  59544. __ai int8x8_t vrbit_s8(int8x8_t __p0) {
  59545. int8x8_t __ret;
  59546. __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
  59547. return __ret;
  59548. }
  59549. #else
  59550. __ai int8x8_t vrbit_s8(int8x8_t __p0) {
  59551. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  59552. int8x8_t __ret;
  59553. __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
  59554. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  59555. return __ret;
  59556. }
  59557. #endif
  59558. #ifdef __LITTLE_ENDIAN__
  59559. __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
  59560. float64x2_t __ret;
  59561. __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
  59562. return __ret;
  59563. }
  59564. #else
  59565. __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
  59566. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59567. float64x2_t __ret;
  59568. __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
  59569. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  59570. return __ret;
  59571. }
  59572. #endif
  59573. #ifdef __LITTLE_ENDIAN__
  59574. __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
  59575. float64x1_t __ret;
  59576. __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
  59577. return __ret;
  59578. }
  59579. #else
  59580. __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
  59581. float64x1_t __ret;
  59582. __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
  59583. return __ret;
  59584. }
  59585. #endif
  59586. #ifdef __LITTLE_ENDIAN__
  59587. __ai float64_t vrecped_f64(float64_t __p0) {
  59588. float64_t __ret;
  59589. __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
  59590. return __ret;
  59591. }
  59592. #else
  59593. __ai float64_t vrecped_f64(float64_t __p0) {
  59594. float64_t __ret;
  59595. __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
  59596. return __ret;
  59597. }
  59598. #endif
  59599. #ifdef __LITTLE_ENDIAN__
  59600. __ai float32_t vrecpes_f32(float32_t __p0) {
  59601. float32_t __ret;
  59602. __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
  59603. return __ret;
  59604. }
  59605. #else
  59606. __ai float32_t vrecpes_f32(float32_t __p0) {
  59607. float32_t __ret;
  59608. __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
  59609. return __ret;
  59610. }
  59611. #endif
  59612. #ifdef __LITTLE_ENDIAN__
  59613. __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
  59614. float64x2_t __ret;
  59615. __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  59616. return __ret;
  59617. }
  59618. #else
  59619. __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
  59620. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59621. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  59622. float64x2_t __ret;
  59623. __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  59624. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  59625. return __ret;
  59626. }
  59627. #endif
  59628. #ifdef __LITTLE_ENDIAN__
  59629. __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
  59630. float64x1_t __ret;
  59631. __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  59632. return __ret;
  59633. }
  59634. #else
  59635. __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
  59636. float64x1_t __ret;
  59637. __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  59638. return __ret;
  59639. }
  59640. #endif
  59641. #ifdef __LITTLE_ENDIAN__
  59642. __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
  59643. float64_t __ret;
  59644. __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
  59645. return __ret;
  59646. }
  59647. #else
  59648. __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
  59649. float64_t __ret;
  59650. __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
  59651. return __ret;
  59652. }
  59653. #endif
  59654. #ifdef __LITTLE_ENDIAN__
  59655. __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
  59656. float32_t __ret;
  59657. __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
  59658. return __ret;
  59659. }
  59660. #else
  59661. __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
  59662. float32_t __ret;
  59663. __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
  59664. return __ret;
  59665. }
  59666. #endif
  59667. #ifdef __LITTLE_ENDIAN__
  59668. __ai float64_t vrecpxd_f64(float64_t __p0) {
  59669. float64_t __ret;
  59670. __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
  59671. return __ret;
  59672. }
  59673. #else
  59674. __ai float64_t vrecpxd_f64(float64_t __p0) {
  59675. float64_t __ret;
  59676. __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
  59677. return __ret;
  59678. }
  59679. #endif
  59680. #ifdef __LITTLE_ENDIAN__
  59681. __ai float32_t vrecpxs_f32(float32_t __p0) {
  59682. float32_t __ret;
  59683. __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
  59684. return __ret;
  59685. }
  59686. #else
  59687. __ai float32_t vrecpxs_f32(float32_t __p0) {
  59688. float32_t __ret;
  59689. __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
  59690. return __ret;
  59691. }
  59692. #endif
  59693. #ifdef __LITTLE_ENDIAN__
  59694. __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
  59695. uint64_t __ret;
  59696. __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
  59697. return __ret;
  59698. }
  59699. #else
  59700. __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
  59701. uint64_t __ret;
  59702. __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
  59703. return __ret;
  59704. }
  59705. #endif
  59706. #ifdef __LITTLE_ENDIAN__
  59707. __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
  59708. int64_t __ret;
  59709. __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
  59710. return __ret;
  59711. }
  59712. #else
  59713. __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
  59714. int64_t __ret;
  59715. __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
  59716. return __ret;
  59717. }
  59718. #endif
  59719. #ifdef __LITTLE_ENDIAN__
  59720. #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
  59721. uint64_t __s0 = __p0; \
  59722. uint64_t __ret; \
  59723. __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
  59724. __ret; \
  59725. })
  59726. #else
  59727. #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
  59728. uint64_t __s0 = __p0; \
  59729. uint64_t __ret; \
  59730. __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
  59731. __ret; \
  59732. })
  59733. #endif
  59734. #ifdef __LITTLE_ENDIAN__
  59735. #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
  59736. int64_t __s0 = __p0; \
  59737. int64_t __ret; \
  59738. __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
  59739. __ret; \
  59740. })
  59741. #else
  59742. #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
  59743. int64_t __s0 = __p0; \
  59744. int64_t __ret; \
  59745. __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
  59746. __ret; \
  59747. })
  59748. #endif
  59749. #ifdef __LITTLE_ENDIAN__
  59750. #define vrshrn_high_n_u32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
  59751. uint16x4_t __s0_218 = __p0_218; \
  59752. uint32x4_t __s1_218 = __p1_218; \
  59753. uint16x8_t __ret_218; \
  59754. __ret_218 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_218), (uint16x4_t)(vrshrn_n_u32(__s1_218, __p2_218)))); \
  59755. __ret_218; \
  59756. })
  59757. #else
  59758. #define vrshrn_high_n_u32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
  59759. uint16x4_t __s0_219 = __p0_219; \
  59760. uint32x4_t __s1_219 = __p1_219; \
  59761. uint16x4_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
  59762. uint32x4_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
  59763. uint16x8_t __ret_219; \
  59764. __ret_219 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_219), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_219, __p2_219)))); \
  59765. __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
  59766. __ret_219; \
  59767. })
  59768. #endif
  59769. #ifdef __LITTLE_ENDIAN__
  59770. #define vrshrn_high_n_u64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
  59771. uint32x2_t __s0_220 = __p0_220; \
  59772. uint64x2_t __s1_220 = __p1_220; \
  59773. uint32x4_t __ret_220; \
  59774. __ret_220 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_220), (uint32x2_t)(vrshrn_n_u64(__s1_220, __p2_220)))); \
  59775. __ret_220; \
  59776. })
  59777. #else
  59778. #define vrshrn_high_n_u64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
  59779. uint32x2_t __s0_221 = __p0_221; \
  59780. uint64x2_t __s1_221 = __p1_221; \
  59781. uint32x2_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
  59782. uint64x2_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
  59783. uint32x4_t __ret_221; \
  59784. __ret_221 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_221), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_221, __p2_221)))); \
  59785. __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
  59786. __ret_221; \
  59787. })
  59788. #endif
  59789. #ifdef __LITTLE_ENDIAN__
  59790. #define vrshrn_high_n_u16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
  59791. uint8x8_t __s0_222 = __p0_222; \
  59792. uint16x8_t __s1_222 = __p1_222; \
  59793. uint8x16_t __ret_222; \
  59794. __ret_222 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_222), (uint8x8_t)(vrshrn_n_u16(__s1_222, __p2_222)))); \
  59795. __ret_222; \
  59796. })
  59797. #else
  59798. #define vrshrn_high_n_u16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
  59799. uint8x8_t __s0_223 = __p0_223; \
  59800. uint16x8_t __s1_223 = __p1_223; \
  59801. uint8x8_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
  59802. uint16x8_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
  59803. uint8x16_t __ret_223; \
  59804. __ret_223 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_223), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_223, __p2_223)))); \
  59805. __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  59806. __ret_223; \
  59807. })
  59808. #endif
  59809. #ifdef __LITTLE_ENDIAN__
  59810. #define vrshrn_high_n_s32(__p0_224, __p1_224, __p2_224) __extension__ ({ \
  59811. int16x4_t __s0_224 = __p0_224; \
  59812. int32x4_t __s1_224 = __p1_224; \
  59813. int16x8_t __ret_224; \
  59814. __ret_224 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_224), (int16x4_t)(vrshrn_n_s32(__s1_224, __p2_224)))); \
  59815. __ret_224; \
  59816. })
  59817. #else
  59818. #define vrshrn_high_n_s32(__p0_225, __p1_225, __p2_225) __extension__ ({ \
  59819. int16x4_t __s0_225 = __p0_225; \
  59820. int32x4_t __s1_225 = __p1_225; \
  59821. int16x4_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 3, 2, 1, 0); \
  59822. int32x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 3, 2, 1, 0); \
  59823. int16x8_t __ret_225; \
  59824. __ret_225 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_225), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_225, __p2_225)))); \
  59825. __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
  59826. __ret_225; \
  59827. })
  59828. #endif
  59829. #ifdef __LITTLE_ENDIAN__
  59830. #define vrshrn_high_n_s64(__p0_226, __p1_226, __p2_226) __extension__ ({ \
  59831. int32x2_t __s0_226 = __p0_226; \
  59832. int64x2_t __s1_226 = __p1_226; \
  59833. int32x4_t __ret_226; \
  59834. __ret_226 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_226), (int32x2_t)(vrshrn_n_s64(__s1_226, __p2_226)))); \
  59835. __ret_226; \
  59836. })
  59837. #else
  59838. #define vrshrn_high_n_s64(__p0_227, __p1_227, __p2_227) __extension__ ({ \
  59839. int32x2_t __s0_227 = __p0_227; \
  59840. int64x2_t __s1_227 = __p1_227; \
  59841. int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 1, 0); \
  59842. int64x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 1, 0); \
  59843. int32x4_t __ret_227; \
  59844. __ret_227 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_227), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_227, __p2_227)))); \
  59845. __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 3, 2, 1, 0); \
  59846. __ret_227; \
  59847. })
  59848. #endif
  59849. #ifdef __LITTLE_ENDIAN__
  59850. #define vrshrn_high_n_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \
  59851. int8x8_t __s0_228 = __p0_228; \
  59852. int16x8_t __s1_228 = __p1_228; \
  59853. int8x16_t __ret_228; \
  59854. __ret_228 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_228), (int8x8_t)(vrshrn_n_s16(__s1_228, __p2_228)))); \
  59855. __ret_228; \
  59856. })
  59857. #else
  59858. #define vrshrn_high_n_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \
  59859. int8x8_t __s0_229 = __p0_229; \
  59860. int16x8_t __s1_229 = __p1_229; \
  59861. int8x8_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
  59862. int16x8_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 7, 6, 5, 4, 3, 2, 1, 0); \
  59863. int8x16_t __ret_229; \
  59864. __ret_229 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_229), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_229, __p2_229)))); \
  59865. __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  59866. __ret_229; \
  59867. })
  59868. #endif
  59869. #ifdef __LITTLE_ENDIAN__
  59870. __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
  59871. float64x2_t __ret;
  59872. __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
  59873. return __ret;
  59874. }
  59875. #else
  59876. __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
  59877. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59878. float64x2_t __ret;
  59879. __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
  59880. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  59881. return __ret;
  59882. }
  59883. #endif
  59884. #ifdef __LITTLE_ENDIAN__
  59885. __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
  59886. float64x1_t __ret;
  59887. __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
  59888. return __ret;
  59889. }
  59890. #else
  59891. __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
  59892. float64x1_t __ret;
  59893. __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
  59894. return __ret;
  59895. }
  59896. #endif
  59897. #ifdef __LITTLE_ENDIAN__
  59898. __ai float64_t vrsqrted_f64(float64_t __p0) {
  59899. float64_t __ret;
  59900. __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
  59901. return __ret;
  59902. }
  59903. #else
  59904. __ai float64_t vrsqrted_f64(float64_t __p0) {
  59905. float64_t __ret;
  59906. __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
  59907. return __ret;
  59908. }
  59909. #endif
  59910. #ifdef __LITTLE_ENDIAN__
  59911. __ai float32_t vrsqrtes_f32(float32_t __p0) {
  59912. float32_t __ret;
  59913. __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
  59914. return __ret;
  59915. }
  59916. #else
  59917. __ai float32_t vrsqrtes_f32(float32_t __p0) {
  59918. float32_t __ret;
  59919. __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
  59920. return __ret;
  59921. }
  59922. #endif
  59923. #ifdef __LITTLE_ENDIAN__
  59924. __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
  59925. float64x2_t __ret;
  59926. __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
  59927. return __ret;
  59928. }
  59929. #else
  59930. __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
  59931. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  59932. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  59933. float64x2_t __ret;
  59934. __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
  59935. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  59936. return __ret;
  59937. }
  59938. #endif
  59939. #ifdef __LITTLE_ENDIAN__
  59940. __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
  59941. float64x1_t __ret;
  59942. __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  59943. return __ret;
  59944. }
  59945. #else
  59946. __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
  59947. float64x1_t __ret;
  59948. __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
  59949. return __ret;
  59950. }
  59951. #endif
  59952. #ifdef __LITTLE_ENDIAN__
  59953. __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
  59954. float64_t __ret;
  59955. __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
  59956. return __ret;
  59957. }
  59958. #else
  59959. __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
  59960. float64_t __ret;
  59961. __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
  59962. return __ret;
  59963. }
  59964. #endif
  59965. #ifdef __LITTLE_ENDIAN__
  59966. __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
  59967. float32_t __ret;
  59968. __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
  59969. return __ret;
  59970. }
  59971. #else
  59972. __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
  59973. float32_t __ret;
  59974. __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
  59975. return __ret;
  59976. }
  59977. #endif
  59978. #ifdef __LITTLE_ENDIAN__
  59979. #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
  59980. uint64_t __s0 = __p0; \
  59981. uint64_t __s1 = __p1; \
  59982. uint64_t __ret; \
  59983. __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
  59984. __ret; \
  59985. })
  59986. #else
  59987. #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
  59988. uint64_t __s0 = __p0; \
  59989. uint64_t __s1 = __p1; \
  59990. uint64_t __ret; \
  59991. __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
  59992. __ret; \
  59993. })
  59994. #endif
  59995. #ifdef __LITTLE_ENDIAN__
  59996. #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
  59997. int64_t __s0 = __p0; \
  59998. int64_t __s1 = __p1; \
  59999. int64_t __ret; \
  60000. __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
  60001. __ret; \
  60002. })
  60003. #else
  60004. #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60005. int64_t __s0 = __p0; \
  60006. int64_t __s1 = __p1; \
  60007. int64_t __ret; \
  60008. __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
  60009. __ret; \
  60010. })
  60011. #endif
  60012. #ifdef __LITTLE_ENDIAN__
  60013. __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  60014. uint16x8_t __ret;
  60015. __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
  60016. return __ret;
  60017. }
  60018. #else
  60019. __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  60020. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  60021. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  60022. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  60023. uint16x8_t __ret;
  60024. __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
  60025. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  60026. return __ret;
  60027. }
  60028. #endif
  60029. #ifdef __LITTLE_ENDIAN__
  60030. __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  60031. uint32x4_t __ret;
  60032. __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
  60033. return __ret;
  60034. }
  60035. #else
  60036. __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  60037. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60038. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  60039. uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  60040. uint32x4_t __ret;
  60041. __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
  60042. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  60043. return __ret;
  60044. }
  60045. #endif
  60046. #ifdef __LITTLE_ENDIAN__
  60047. __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  60048. uint8x16_t __ret;
  60049. __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
  60050. return __ret;
  60051. }
  60052. #else
  60053. __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  60054. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  60055. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  60056. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  60057. uint8x16_t __ret;
  60058. __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
  60059. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  60060. return __ret;
  60061. }
  60062. #endif
  60063. #ifdef __LITTLE_ENDIAN__
  60064. __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  60065. int16x8_t __ret;
  60066. __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
  60067. return __ret;
  60068. }
  60069. #else
  60070. __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  60071. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  60072. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  60073. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  60074. int16x8_t __ret;
  60075. __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
  60076. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  60077. return __ret;
  60078. }
  60079. #endif
  60080. #ifdef __LITTLE_ENDIAN__
  60081. __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  60082. int32x4_t __ret;
  60083. __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
  60084. return __ret;
  60085. }
  60086. #else
  60087. __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  60088. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60089. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  60090. int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  60091. int32x4_t __ret;
  60092. __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
  60093. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  60094. return __ret;
  60095. }
  60096. #endif
  60097. #ifdef __LITTLE_ENDIAN__
  60098. __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  60099. int8x16_t __ret;
  60100. __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
  60101. return __ret;
  60102. }
  60103. #else
  60104. __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  60105. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  60106. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  60107. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  60108. int8x16_t __ret;
  60109. __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
  60110. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  60111. return __ret;
  60112. }
  60113. #endif
  60114. #ifdef __LITTLE_ENDIAN__
  60115. #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60116. poly64_t __s0 = __p0; \
  60117. poly64x1_t __s1 = __p1; \
  60118. poly64x1_t __ret; \
  60119. __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  60120. __ret; \
  60121. })
  60122. #else
  60123. #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60124. poly64_t __s0 = __p0; \
  60125. poly64x1_t __s1 = __p1; \
  60126. poly64x1_t __ret; \
  60127. __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  60128. __ret; \
  60129. })
  60130. #define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60131. poly64_t __s0 = __p0; \
  60132. poly64x1_t __s1 = __p1; \
  60133. poly64x1_t __ret; \
  60134. __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
  60135. __ret; \
  60136. })
  60137. #endif
  60138. #ifdef __LITTLE_ENDIAN__
  60139. #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60140. poly64_t __s0 = __p0; \
  60141. poly64x2_t __s1 = __p1; \
  60142. poly64x2_t __ret; \
  60143. __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  60144. __ret; \
  60145. })
  60146. #else
  60147. #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60148. poly64_t __s0 = __p0; \
  60149. poly64x2_t __s1 = __p1; \
  60150. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60151. poly64x2_t __ret; \
  60152. __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
  60153. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  60154. __ret; \
  60155. })
  60156. #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60157. poly64_t __s0 = __p0; \
  60158. poly64x2_t __s1 = __p1; \
  60159. poly64x2_t __ret; \
  60160. __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
  60161. __ret; \
  60162. })
  60163. #endif
  60164. #ifdef __LITTLE_ENDIAN__
  60165. #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60166. float64_t __s0 = __p0; \
  60167. float64x2_t __s1 = __p1; \
  60168. float64x2_t __ret; \
  60169. __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
  60170. __ret; \
  60171. })
  60172. #else
  60173. #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60174. float64_t __s0 = __p0; \
  60175. float64x2_t __s1 = __p1; \
  60176. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60177. float64x2_t __ret; \
  60178. __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
  60179. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  60180. __ret; \
  60181. })
  60182. #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60183. float64_t __s0 = __p0; \
  60184. float64x2_t __s1 = __p1; \
  60185. float64x2_t __ret; \
  60186. __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
  60187. __ret; \
  60188. })
  60189. #endif
  60190. #ifdef __LITTLE_ENDIAN__
  60191. #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60192. float64_t __s0 = __p0; \
  60193. float64x1_t __s1 = __p1; \
  60194. float64x1_t __ret; \
  60195. __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
  60196. __ret; \
  60197. })
  60198. #else
  60199. #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60200. float64_t __s0 = __p0; \
  60201. float64x1_t __s1 = __p1; \
  60202. float64x1_t __ret; \
  60203. __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
  60204. __ret; \
  60205. })
  60206. #define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  60207. float64_t __s0 = __p0; \
  60208. float64x1_t __s1 = __p1; \
  60209. float64x1_t __ret; \
  60210. __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
  60211. __ret; \
  60212. })
  60213. #endif
  60214. #ifdef __LITTLE_ENDIAN__
  60215. __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
  60216. uint64_t __ret;
  60217. __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
  60218. return __ret;
  60219. }
  60220. #else
  60221. __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
  60222. uint64_t __ret;
  60223. __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
  60224. return __ret;
  60225. }
  60226. #endif
  60227. #ifdef __LITTLE_ENDIAN__
  60228. __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
  60229. int64_t __ret;
  60230. __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
  60231. return __ret;
  60232. }
  60233. #else
  60234. __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
  60235. int64_t __ret;
  60236. __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
  60237. return __ret;
  60238. }
  60239. #endif
  60240. #ifdef __LITTLE_ENDIAN__
  60241. #define vshld_n_u64(__p0, __p1) __extension__ ({ \
  60242. uint64_t __s0 = __p0; \
  60243. uint64_t __ret; \
  60244. __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
  60245. __ret; \
  60246. })
  60247. #else
  60248. #define vshld_n_u64(__p0, __p1) __extension__ ({ \
  60249. uint64_t __s0 = __p0; \
  60250. uint64_t __ret; \
  60251. __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
  60252. __ret; \
  60253. })
  60254. #endif
  60255. #ifdef __LITTLE_ENDIAN__
  60256. #define vshld_n_s64(__p0, __p1) __extension__ ({ \
  60257. int64_t __s0 = __p0; \
  60258. int64_t __ret; \
  60259. __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
  60260. __ret; \
  60261. })
  60262. #else
  60263. #define vshld_n_s64(__p0, __p1) __extension__ ({ \
  60264. int64_t __s0 = __p0; \
  60265. int64_t __ret; \
  60266. __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
  60267. __ret; \
  60268. })
  60269. #endif
  60270. #ifdef __LITTLE_ENDIAN__
  60271. #define vshll_high_n_u8(__p0_230, __p1_230) __extension__ ({ \
  60272. uint8x16_t __s0_230 = __p0_230; \
  60273. uint16x8_t __ret_230; \
  60274. __ret_230 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_230), __p1_230)); \
  60275. __ret_230; \
  60276. })
  60277. #else
  60278. #define vshll_high_n_u8(__p0_231, __p1_231) __extension__ ({ \
  60279. uint8x16_t __s0_231 = __p0_231; \
  60280. uint8x16_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  60281. uint16x8_t __ret_231; \
  60282. __ret_231 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_231), __p1_231)); \
  60283. __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
  60284. __ret_231; \
  60285. })
  60286. #endif
  60287. #ifdef __LITTLE_ENDIAN__
  60288. #define vshll_high_n_u32(__p0_232, __p1_232) __extension__ ({ \
  60289. uint32x4_t __s0_232 = __p0_232; \
  60290. uint64x2_t __ret_232; \
  60291. __ret_232 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_232), __p1_232)); \
  60292. __ret_232; \
  60293. })
  60294. #else
  60295. #define vshll_high_n_u32(__p0_233, __p1_233) __extension__ ({ \
  60296. uint32x4_t __s0_233 = __p0_233; \
  60297. uint32x4_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
  60298. uint64x2_t __ret_233; \
  60299. __ret_233 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_233), __p1_233)); \
  60300. __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
  60301. __ret_233; \
  60302. })
  60303. #endif
  60304. #ifdef __LITTLE_ENDIAN__
  60305. #define vshll_high_n_u16(__p0_234, __p1_234) __extension__ ({ \
  60306. uint16x8_t __s0_234 = __p0_234; \
  60307. uint32x4_t __ret_234; \
  60308. __ret_234 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_234), __p1_234)); \
  60309. __ret_234; \
  60310. })
  60311. #else
  60312. #define vshll_high_n_u16(__p0_235, __p1_235) __extension__ ({ \
  60313. uint16x8_t __s0_235 = __p0_235; \
  60314. uint16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
  60315. uint32x4_t __ret_235; \
  60316. __ret_235 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_235), __p1_235)); \
  60317. __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
  60318. __ret_235; \
  60319. })
  60320. #endif
  60321. #ifdef __LITTLE_ENDIAN__
  60322. #define vshll_high_n_s8(__p0_236, __p1_236) __extension__ ({ \
  60323. int8x16_t __s0_236 = __p0_236; \
  60324. int16x8_t __ret_236; \
  60325. __ret_236 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_236), __p1_236)); \
  60326. __ret_236; \
  60327. })
  60328. #else
  60329. #define vshll_high_n_s8(__p0_237, __p1_237) __extension__ ({ \
  60330. int8x16_t __s0_237 = __p0_237; \
  60331. int8x16_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  60332. int16x8_t __ret_237; \
  60333. __ret_237 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_237), __p1_237)); \
  60334. __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
  60335. __ret_237; \
  60336. })
  60337. #endif
  60338. #ifdef __LITTLE_ENDIAN__
  60339. #define vshll_high_n_s32(__p0_238, __p1_238) __extension__ ({ \
  60340. int32x4_t __s0_238 = __p0_238; \
  60341. int64x2_t __ret_238; \
  60342. __ret_238 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_238), __p1_238)); \
  60343. __ret_238; \
  60344. })
  60345. #else
  60346. #define vshll_high_n_s32(__p0_239, __p1_239) __extension__ ({ \
  60347. int32x4_t __s0_239 = __p0_239; \
  60348. int32x4_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \
  60349. int64x2_t __ret_239; \
  60350. __ret_239 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_239), __p1_239)); \
  60351. __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
  60352. __ret_239; \
  60353. })
  60354. #endif
  60355. #ifdef __LITTLE_ENDIAN__
  60356. #define vshll_high_n_s16(__p0_240, __p1_240) __extension__ ({ \
  60357. int16x8_t __s0_240 = __p0_240; \
  60358. int32x4_t __ret_240; \
  60359. __ret_240 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_240), __p1_240)); \
  60360. __ret_240; \
  60361. })
  60362. #else
  60363. #define vshll_high_n_s16(__p0_241, __p1_241) __extension__ ({ \
  60364. int16x8_t __s0_241 = __p0_241; \
  60365. int16x8_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
  60366. int32x4_t __ret_241; \
  60367. __ret_241 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_241), __p1_241)); \
  60368. __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \
  60369. __ret_241; \
  60370. })
  60371. #endif
  60372. #ifdef __LITTLE_ENDIAN__
  60373. #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
  60374. uint64_t __s0 = __p0; \
  60375. uint64_t __ret; \
  60376. __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
  60377. __ret; \
  60378. })
  60379. #else
  60380. #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
  60381. uint64_t __s0 = __p0; \
  60382. uint64_t __ret; \
  60383. __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
  60384. __ret; \
  60385. })
  60386. #endif
  60387. #ifdef __LITTLE_ENDIAN__
  60388. #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
  60389. int64_t __s0 = __p0; \
  60390. int64_t __ret; \
  60391. __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
  60392. __ret; \
  60393. })
  60394. #else
  60395. #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
  60396. int64_t __s0 = __p0; \
  60397. int64_t __ret; \
  60398. __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
  60399. __ret; \
  60400. })
  60401. #endif
  60402. #ifdef __LITTLE_ENDIAN__
  60403. #define vshrn_high_n_u32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
  60404. uint16x4_t __s0_242 = __p0_242; \
  60405. uint32x4_t __s1_242 = __p1_242; \
  60406. uint16x8_t __ret_242; \
  60407. __ret_242 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_242), (uint16x4_t)(vshrn_n_u32(__s1_242, __p2_242)))); \
  60408. __ret_242; \
  60409. })
  60410. #else
  60411. #define vshrn_high_n_u32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
  60412. uint16x4_t __s0_243 = __p0_243; \
  60413. uint32x4_t __s1_243 = __p1_243; \
  60414. uint16x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
  60415. uint32x4_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
  60416. uint16x8_t __ret_243; \
  60417. __ret_243 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_243), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_243, __p2_243)))); \
  60418. __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
  60419. __ret_243; \
  60420. })
  60421. #endif
  60422. #ifdef __LITTLE_ENDIAN__
  60423. #define vshrn_high_n_u64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
  60424. uint32x2_t __s0_244 = __p0_244; \
  60425. uint64x2_t __s1_244 = __p1_244; \
  60426. uint32x4_t __ret_244; \
  60427. __ret_244 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_244), (uint32x2_t)(vshrn_n_u64(__s1_244, __p2_244)))); \
  60428. __ret_244; \
  60429. })
  60430. #else
  60431. #define vshrn_high_n_u64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
  60432. uint32x2_t __s0_245 = __p0_245; \
  60433. uint64x2_t __s1_245 = __p1_245; \
  60434. uint32x2_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
  60435. uint64x2_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
  60436. uint32x4_t __ret_245; \
  60437. __ret_245 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_245), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_245, __p2_245)))); \
  60438. __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
  60439. __ret_245; \
  60440. })
  60441. #endif
  60442. #ifdef __LITTLE_ENDIAN__
  60443. #define vshrn_high_n_u16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
  60444. uint8x8_t __s0_246 = __p0_246; \
  60445. uint16x8_t __s1_246 = __p1_246; \
  60446. uint8x16_t __ret_246; \
  60447. __ret_246 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_246), (uint8x8_t)(vshrn_n_u16(__s1_246, __p2_246)))); \
  60448. __ret_246; \
  60449. })
  60450. #else
  60451. #define vshrn_high_n_u16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
  60452. uint8x8_t __s0_247 = __p0_247; \
  60453. uint16x8_t __s1_247 = __p1_247; \
  60454. uint8x8_t __rev0_247; __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
  60455. uint16x8_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
  60456. uint8x16_t __ret_247; \
  60457. __ret_247 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_247), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_247, __p2_247)))); \
  60458. __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  60459. __ret_247; \
  60460. })
  60461. #endif
  60462. #ifdef __LITTLE_ENDIAN__
  60463. #define vshrn_high_n_s32(__p0_248, __p1_248, __p2_248) __extension__ ({ \
  60464. int16x4_t __s0_248 = __p0_248; \
  60465. int32x4_t __s1_248 = __p1_248; \
  60466. int16x8_t __ret_248; \
  60467. __ret_248 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_248), (int16x4_t)(vshrn_n_s32(__s1_248, __p2_248)))); \
  60468. __ret_248; \
  60469. })
  60470. #else
  60471. #define vshrn_high_n_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \
  60472. int16x4_t __s0_249 = __p0_249; \
  60473. int32x4_t __s1_249 = __p1_249; \
  60474. int16x4_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
  60475. int32x4_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 3, 2, 1, 0); \
  60476. int16x8_t __ret_249; \
  60477. __ret_249 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_249), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_249, __p2_249)))); \
  60478. __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \
  60479. __ret_249; \
  60480. })
  60481. #endif
  60482. #ifdef __LITTLE_ENDIAN__
  60483. #define vshrn_high_n_s64(__p0_250, __p1_250, __p2_250) __extension__ ({ \
  60484. int32x2_t __s0_250 = __p0_250; \
  60485. int64x2_t __s1_250 = __p1_250; \
  60486. int32x4_t __ret_250; \
  60487. __ret_250 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_250), (int32x2_t)(vshrn_n_s64(__s1_250, __p2_250)))); \
  60488. __ret_250; \
  60489. })
  60490. #else
  60491. #define vshrn_high_n_s64(__p0_251, __p1_251, __p2_251) __extension__ ({ \
  60492. int32x2_t __s0_251 = __p0_251; \
  60493. int64x2_t __s1_251 = __p1_251; \
  60494. int32x2_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 1, 0); \
  60495. int64x2_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 1, 0); \
  60496. int32x4_t __ret_251; \
  60497. __ret_251 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_251), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_251, __p2_251)))); \
  60498. __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 3, 2, 1, 0); \
  60499. __ret_251; \
  60500. })
  60501. #endif
  60502. #ifdef __LITTLE_ENDIAN__
  60503. #define vshrn_high_n_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \
  60504. int8x8_t __s0_252 = __p0_252; \
  60505. int16x8_t __s1_252 = __p1_252; \
  60506. int8x16_t __ret_252; \
  60507. __ret_252 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_252), (int8x8_t)(vshrn_n_s16(__s1_252, __p2_252)))); \
  60508. __ret_252; \
  60509. })
  60510. #else
  60511. #define vshrn_high_n_s16(__p0_253, __p1_253, __p2_253) __extension__ ({ \
  60512. int8x8_t __s0_253 = __p0_253; \
  60513. int16x8_t __s1_253 = __p1_253; \
  60514. int8x8_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 7, 6, 5, 4, 3, 2, 1, 0); \
  60515. int16x8_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 7, 6, 5, 4, 3, 2, 1, 0); \
  60516. int8x16_t __ret_253; \
  60517. __ret_253 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_253), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_253, __p2_253)))); \
  60518. __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  60519. __ret_253; \
  60520. })
  60521. #endif
  60522. #ifdef __LITTLE_ENDIAN__
  60523. #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60524. uint64_t __s0 = __p0; \
  60525. uint64_t __s1 = __p1; \
  60526. uint64_t __ret; \
  60527. __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
  60528. __ret; \
  60529. })
  60530. #else
  60531. #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60532. uint64_t __s0 = __p0; \
  60533. uint64_t __s1 = __p1; \
  60534. uint64_t __ret; \
  60535. __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
  60536. __ret; \
  60537. })
  60538. #endif
  60539. #ifdef __LITTLE_ENDIAN__
  60540. #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60541. int64_t __s0 = __p0; \
  60542. int64_t __s1 = __p1; \
  60543. int64_t __ret; \
  60544. __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
  60545. __ret; \
  60546. })
  60547. #else
  60548. #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60549. int64_t __s0 = __p0; \
  60550. int64_t __s1 = __p1; \
  60551. int64_t __ret; \
  60552. __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
  60553. __ret; \
  60554. })
  60555. #endif
  60556. #ifdef __LITTLE_ENDIAN__
  60557. #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60558. poly64x1_t __s0 = __p0; \
  60559. poly64x1_t __s1 = __p1; \
  60560. poly64x1_t __ret; \
  60561. __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  60562. __ret; \
  60563. })
  60564. #else
  60565. #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60566. poly64x1_t __s0 = __p0; \
  60567. poly64x1_t __s1 = __p1; \
  60568. poly64x1_t __ret; \
  60569. __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  60570. __ret; \
  60571. })
  60572. #endif
  60573. #ifdef __LITTLE_ENDIAN__
  60574. #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60575. poly64x2_t __s0 = __p0; \
  60576. poly64x2_t __s1 = __p1; \
  60577. poly64x2_t __ret; \
  60578. __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
  60579. __ret; \
  60580. })
  60581. #else
  60582. #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60583. poly64x2_t __s0 = __p0; \
  60584. poly64x2_t __s1 = __p1; \
  60585. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  60586. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60587. poly64x2_t __ret; \
  60588. __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
  60589. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  60590. __ret; \
  60591. })
  60592. #endif
  60593. #ifdef __LITTLE_ENDIAN__
  60594. __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
  60595. uint8_t __ret;
  60596. __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
  60597. return __ret;
  60598. }
  60599. #else
  60600. __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
  60601. uint8_t __ret;
  60602. __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
  60603. return __ret;
  60604. }
  60605. #endif
  60606. #ifdef __LITTLE_ENDIAN__
  60607. __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
  60608. uint32_t __ret;
  60609. __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
  60610. return __ret;
  60611. }
  60612. #else
  60613. __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
  60614. uint32_t __ret;
  60615. __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
  60616. return __ret;
  60617. }
  60618. #endif
  60619. #ifdef __LITTLE_ENDIAN__
  60620. __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
  60621. uint64_t __ret;
  60622. __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
  60623. return __ret;
  60624. }
  60625. #else
  60626. __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
  60627. uint64_t __ret;
  60628. __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
  60629. return __ret;
  60630. }
  60631. #endif
  60632. #ifdef __LITTLE_ENDIAN__
  60633. __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
  60634. uint16_t __ret;
  60635. __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
  60636. return __ret;
  60637. }
  60638. #else
  60639. __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
  60640. uint16_t __ret;
  60641. __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
  60642. return __ret;
  60643. }
  60644. #endif
  60645. #ifdef __LITTLE_ENDIAN__
  60646. __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  60647. uint8x16_t __ret;
  60648. __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
  60649. return __ret;
  60650. }
  60651. #else
  60652. __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
  60653. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  60654. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  60655. uint8x16_t __ret;
  60656. __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
  60657. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  60658. return __ret;
  60659. }
  60660. #endif
  60661. #ifdef __LITTLE_ENDIAN__
  60662. __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  60663. uint32x4_t __ret;
  60664. __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
  60665. return __ret;
  60666. }
  60667. #else
  60668. __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
  60669. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  60670. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  60671. uint32x4_t __ret;
  60672. __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
  60673. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  60674. return __ret;
  60675. }
  60676. #endif
  60677. #ifdef __LITTLE_ENDIAN__
  60678. __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  60679. uint64x2_t __ret;
  60680. __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  60681. return __ret;
  60682. }
  60683. #else
  60684. __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  60685. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60686. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  60687. uint64x2_t __ret;
  60688. __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  60689. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  60690. return __ret;
  60691. }
  60692. #endif
  60693. #ifdef __LITTLE_ENDIAN__
  60694. __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  60695. uint16x8_t __ret;
  60696. __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
  60697. return __ret;
  60698. }
  60699. #else
  60700. __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
  60701. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  60702. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  60703. uint16x8_t __ret;
  60704. __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
  60705. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  60706. return __ret;
  60707. }
  60708. #endif
  60709. #ifdef __LITTLE_ENDIAN__
  60710. __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  60711. uint8x8_t __ret;
  60712. __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
  60713. return __ret;
  60714. }
  60715. #else
  60716. __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
  60717. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  60718. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  60719. uint8x8_t __ret;
  60720. __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
  60721. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  60722. return __ret;
  60723. }
  60724. #endif
  60725. #ifdef __LITTLE_ENDIAN__
  60726. __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  60727. uint32x2_t __ret;
  60728. __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
  60729. return __ret;
  60730. }
  60731. #else
  60732. __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
  60733. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60734. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  60735. uint32x2_t __ret;
  60736. __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
  60737. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  60738. return __ret;
  60739. }
  60740. #endif
  60741. #ifdef __LITTLE_ENDIAN__
  60742. __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  60743. uint64x1_t __ret;
  60744. __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  60745. return __ret;
  60746. }
  60747. #else
  60748. __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
  60749. uint64x1_t __ret;
  60750. __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  60751. return __ret;
  60752. }
  60753. #endif
  60754. #ifdef __LITTLE_ENDIAN__
  60755. __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  60756. uint16x4_t __ret;
  60757. __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
  60758. return __ret;
  60759. }
  60760. #else
  60761. __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
  60762. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  60763. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  60764. uint16x4_t __ret;
  60765. __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
  60766. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  60767. return __ret;
  60768. }
  60769. #endif
  60770. #ifdef __LITTLE_ENDIAN__
  60771. __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
  60772. float64x2_t __ret;
  60773. __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
  60774. return __ret;
  60775. }
  60776. #else
  60777. __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
  60778. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60779. float64x2_t __ret;
  60780. __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
  60781. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  60782. return __ret;
  60783. }
  60784. #endif
  60785. #ifdef __LITTLE_ENDIAN__
  60786. __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
  60787. float32x4_t __ret;
  60788. __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
  60789. return __ret;
  60790. }
  60791. #else
  60792. __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
  60793. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  60794. float32x4_t __ret;
  60795. __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
  60796. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  60797. return __ret;
  60798. }
  60799. #endif
  60800. #ifdef __LITTLE_ENDIAN__
  60801. __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
  60802. float64x1_t __ret;
  60803. __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
  60804. return __ret;
  60805. }
  60806. #else
  60807. __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
  60808. float64x1_t __ret;
  60809. __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
  60810. return __ret;
  60811. }
  60812. #endif
  60813. #ifdef __LITTLE_ENDIAN__
  60814. __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
  60815. float32x2_t __ret;
  60816. __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
  60817. return __ret;
  60818. }
  60819. #else
  60820. __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
  60821. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  60822. float32x2_t __ret;
  60823. __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
  60824. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  60825. return __ret;
  60826. }
  60827. #endif
  60828. #ifdef __LITTLE_ENDIAN__
  60829. #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60830. uint64_t __s0 = __p0; \
  60831. uint64_t __s1 = __p1; \
  60832. uint64_t __ret; \
  60833. __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
  60834. __ret; \
  60835. })
  60836. #else
  60837. #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60838. uint64_t __s0 = __p0; \
  60839. uint64_t __s1 = __p1; \
  60840. uint64_t __ret; \
  60841. __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
  60842. __ret; \
  60843. })
  60844. #endif
  60845. #ifdef __LITTLE_ENDIAN__
  60846. #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60847. int64_t __s0 = __p0; \
  60848. int64_t __s1 = __p1; \
  60849. int64_t __ret; \
  60850. __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
  60851. __ret; \
  60852. })
  60853. #else
  60854. #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60855. int64_t __s0 = __p0; \
  60856. int64_t __s1 = __p1; \
  60857. int64_t __ret; \
  60858. __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
  60859. __ret; \
  60860. })
  60861. #endif
  60862. #ifdef __LITTLE_ENDIAN__
  60863. #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60864. uint64_t __s0 = __p0; \
  60865. uint64_t __s1 = __p1; \
  60866. uint64_t __ret; \
  60867. __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
  60868. __ret; \
  60869. })
  60870. #else
  60871. #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
  60872. uint64_t __s0 = __p0; \
  60873. uint64_t __s1 = __p1; \
  60874. uint64_t __ret; \
  60875. __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
  60876. __ret; \
  60877. })
  60878. #endif
  60879. #ifdef __LITTLE_ENDIAN__
  60880. #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60881. int64_t __s0 = __p0; \
  60882. int64_t __s1 = __p1; \
  60883. int64_t __ret; \
  60884. __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
  60885. __ret; \
  60886. })
  60887. #else
  60888. #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
  60889. int64_t __s0 = __p0; \
  60890. int64_t __s1 = __p1; \
  60891. int64_t __ret; \
  60892. __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
  60893. __ret; \
  60894. })
  60895. #endif
  60896. #ifdef __LITTLE_ENDIAN__
  60897. #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60898. poly64x1_t __s0 = __p0; \
  60899. poly64x1_t __s1 = __p1; \
  60900. poly64x1_t __ret; \
  60901. __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  60902. __ret; \
  60903. })
  60904. #else
  60905. #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60906. poly64x1_t __s0 = __p0; \
  60907. poly64x1_t __s1 = __p1; \
  60908. poly64x1_t __ret; \
  60909. __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
  60910. __ret; \
  60911. })
  60912. #endif
  60913. #ifdef __LITTLE_ENDIAN__
  60914. #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60915. poly64x2_t __s0 = __p0; \
  60916. poly64x2_t __s1 = __p1; \
  60917. poly64x2_t __ret; \
  60918. __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
  60919. __ret; \
  60920. })
  60921. #else
  60922. #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
  60923. poly64x2_t __s0 = __p0; \
  60924. poly64x2_t __s1 = __p1; \
  60925. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  60926. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60927. poly64x2_t __ret; \
  60928. __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
  60929. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  60930. __ret; \
  60931. })
  60932. #endif
  60933. #ifdef __LITTLE_ENDIAN__
  60934. #define vst1_p64(__p0, __p1) __extension__ ({ \
  60935. poly64x1_t __s1 = __p1; \
  60936. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
  60937. })
  60938. #else
  60939. #define vst1_p64(__p0, __p1) __extension__ ({ \
  60940. poly64x1_t __s1 = __p1; \
  60941. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
  60942. })
  60943. #endif
  60944. #ifdef __LITTLE_ENDIAN__
  60945. #define vst1q_p64(__p0, __p1) __extension__ ({ \
  60946. poly64x2_t __s1 = __p1; \
  60947. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
  60948. })
  60949. #else
  60950. #define vst1q_p64(__p0, __p1) __extension__ ({ \
  60951. poly64x2_t __s1 = __p1; \
  60952. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60953. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
  60954. })
  60955. #endif
  60956. #ifdef __LITTLE_ENDIAN__
  60957. #define vst1q_f64(__p0, __p1) __extension__ ({ \
  60958. float64x2_t __s1 = __p1; \
  60959. __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
  60960. })
  60961. #else
  60962. #define vst1q_f64(__p0, __p1) __extension__ ({ \
  60963. float64x2_t __s1 = __p1; \
  60964. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60965. __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
  60966. })
  60967. #endif
  60968. #ifdef __LITTLE_ENDIAN__
  60969. #define vst1_f64(__p0, __p1) __extension__ ({ \
  60970. float64x1_t __s1 = __p1; \
  60971. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
  60972. })
  60973. #else
  60974. #define vst1_f64(__p0, __p1) __extension__ ({ \
  60975. float64x1_t __s1 = __p1; \
  60976. __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
  60977. })
  60978. #endif
  60979. #ifdef __LITTLE_ENDIAN__
  60980. #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60981. poly64x1_t __s1 = __p1; \
  60982. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
  60983. })
  60984. #else
  60985. #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60986. poly64x1_t __s1 = __p1; \
  60987. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
  60988. })
  60989. #endif
  60990. #ifdef __LITTLE_ENDIAN__
  60991. #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60992. poly64x2_t __s1 = __p1; \
  60993. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
  60994. })
  60995. #else
  60996. #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  60997. poly64x2_t __s1 = __p1; \
  60998. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  60999. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
  61000. })
  61001. #endif
  61002. #ifdef __LITTLE_ENDIAN__
  61003. #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  61004. float64x2_t __s1 = __p1; \
  61005. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
  61006. })
  61007. #else
  61008. #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  61009. float64x2_t __s1 = __p1; \
  61010. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  61011. __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
  61012. })
  61013. #endif
  61014. #ifdef __LITTLE_ENDIAN__
  61015. #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  61016. float64x1_t __s1 = __p1; \
  61017. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
  61018. })
  61019. #else
  61020. #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  61021. float64x1_t __s1 = __p1; \
  61022. __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
  61023. })
  61024. #endif
  61025. #ifdef __LITTLE_ENDIAN__
  61026. #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
  61027. poly8x8x2_t __s1 = __p1; \
  61028. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
  61029. })
  61030. #else
  61031. #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
  61032. poly8x8x2_t __s1 = __p1; \
  61033. poly8x8x2_t __rev1; \
  61034. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61035. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61036. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
  61037. })
  61038. #endif
  61039. #ifdef __LITTLE_ENDIAN__
  61040. #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
  61041. poly64x1x2_t __s1 = __p1; \
  61042. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
  61043. })
  61044. #else
  61045. #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
  61046. poly64x1x2_t __s1 = __p1; \
  61047. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
  61048. })
  61049. #endif
  61050. #ifdef __LITTLE_ENDIAN__
  61051. #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
  61052. poly16x4x2_t __s1 = __p1; \
  61053. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
  61054. })
  61055. #else
  61056. #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
  61057. poly16x4x2_t __s1 = __p1; \
  61058. poly16x4x2_t __rev1; \
  61059. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61060. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61061. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
  61062. })
  61063. #endif
  61064. #ifdef __LITTLE_ENDIAN__
  61065. #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
  61066. poly8x16x2_t __s1 = __p1; \
  61067. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
  61068. })
  61069. #else
  61070. #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
  61071. poly8x16x2_t __s1 = __p1; \
  61072. poly8x16x2_t __rev1; \
  61073. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61074. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61075. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
  61076. })
  61077. #endif
  61078. #ifdef __LITTLE_ENDIAN__
  61079. #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
  61080. poly64x2x2_t __s1 = __p1; \
  61081. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
  61082. })
  61083. #else
  61084. #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
  61085. poly64x2x2_t __s1 = __p1; \
  61086. poly64x2x2_t __rev1; \
  61087. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61088. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61089. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
  61090. })
  61091. #endif
  61092. #ifdef __LITTLE_ENDIAN__
  61093. #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
  61094. poly16x8x2_t __s1 = __p1; \
  61095. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
  61096. })
  61097. #else
  61098. #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
  61099. poly16x8x2_t __s1 = __p1; \
  61100. poly16x8x2_t __rev1; \
  61101. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61102. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61103. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
  61104. })
  61105. #endif
  61106. #ifdef __LITTLE_ENDIAN__
  61107. #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
  61108. uint8x16x2_t __s1 = __p1; \
  61109. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
  61110. })
  61111. #else
  61112. #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
  61113. uint8x16x2_t __s1 = __p1; \
  61114. uint8x16x2_t __rev1; \
  61115. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61116. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61117. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
  61118. })
  61119. #endif
  61120. #ifdef __LITTLE_ENDIAN__
  61121. #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
  61122. uint32x4x2_t __s1 = __p1; \
  61123. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
  61124. })
  61125. #else
  61126. #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
  61127. uint32x4x2_t __s1 = __p1; \
  61128. uint32x4x2_t __rev1; \
  61129. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61130. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61131. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
  61132. })
  61133. #endif
  61134. #ifdef __LITTLE_ENDIAN__
  61135. #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
  61136. uint64x2x2_t __s1 = __p1; \
  61137. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
  61138. })
  61139. #else
  61140. #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
  61141. uint64x2x2_t __s1 = __p1; \
  61142. uint64x2x2_t __rev1; \
  61143. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61144. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61145. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
  61146. })
  61147. #endif
  61148. #ifdef __LITTLE_ENDIAN__
  61149. #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
  61150. uint16x8x2_t __s1 = __p1; \
  61151. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
  61152. })
  61153. #else
  61154. #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
  61155. uint16x8x2_t __s1 = __p1; \
  61156. uint16x8x2_t __rev1; \
  61157. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61158. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61159. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
  61160. })
  61161. #endif
  61162. #ifdef __LITTLE_ENDIAN__
  61163. #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
  61164. int8x16x2_t __s1 = __p1; \
  61165. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
  61166. })
  61167. #else
  61168. #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
  61169. int8x16x2_t __s1 = __p1; \
  61170. int8x16x2_t __rev1; \
  61171. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61172. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61173. __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
  61174. })
  61175. #endif
  61176. #ifdef __LITTLE_ENDIAN__
  61177. #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
  61178. float64x2x2_t __s1 = __p1; \
  61179. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
  61180. })
  61181. #else
  61182. #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
  61183. float64x2x2_t __s1 = __p1; \
  61184. float64x2x2_t __rev1; \
  61185. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61186. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61187. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
  61188. })
  61189. #endif
  61190. #ifdef __LITTLE_ENDIAN__
  61191. #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
  61192. float32x4x2_t __s1 = __p1; \
  61193. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
  61194. })
  61195. #else
  61196. #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
  61197. float32x4x2_t __s1 = __p1; \
  61198. float32x4x2_t __rev1; \
  61199. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61200. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61201. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
  61202. })
  61203. #endif
  61204. #ifdef __LITTLE_ENDIAN__
  61205. #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
  61206. float16x8x2_t __s1 = __p1; \
  61207. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
  61208. })
  61209. #else
  61210. #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
  61211. float16x8x2_t __s1 = __p1; \
  61212. float16x8x2_t __rev1; \
  61213. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61214. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61215. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
  61216. })
  61217. #endif
  61218. #ifdef __LITTLE_ENDIAN__
  61219. #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
  61220. int32x4x2_t __s1 = __p1; \
  61221. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
  61222. })
  61223. #else
  61224. #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
  61225. int32x4x2_t __s1 = __p1; \
  61226. int32x4x2_t __rev1; \
  61227. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61228. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61229. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
  61230. })
  61231. #endif
  61232. #ifdef __LITTLE_ENDIAN__
  61233. #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
  61234. int64x2x2_t __s1 = __p1; \
  61235. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
  61236. })
  61237. #else
  61238. #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
  61239. int64x2x2_t __s1 = __p1; \
  61240. int64x2x2_t __rev1; \
  61241. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61242. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61243. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
  61244. })
  61245. #endif
  61246. #ifdef __LITTLE_ENDIAN__
  61247. #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
  61248. int16x8x2_t __s1 = __p1; \
  61249. __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
  61250. })
  61251. #else
  61252. #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
  61253. int16x8x2_t __s1 = __p1; \
  61254. int16x8x2_t __rev1; \
  61255. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61256. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61257. __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
  61258. })
  61259. #endif
  61260. #ifdef __LITTLE_ENDIAN__
  61261. #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
  61262. uint8x8x2_t __s1 = __p1; \
  61263. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
  61264. })
  61265. #else
  61266. #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
  61267. uint8x8x2_t __s1 = __p1; \
  61268. uint8x8x2_t __rev1; \
  61269. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61270. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61271. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
  61272. })
  61273. #endif
  61274. #ifdef __LITTLE_ENDIAN__
  61275. #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
  61276. uint32x2x2_t __s1 = __p1; \
  61277. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
  61278. })
  61279. #else
  61280. #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
  61281. uint32x2x2_t __s1 = __p1; \
  61282. uint32x2x2_t __rev1; \
  61283. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61284. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61285. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
  61286. })
  61287. #endif
  61288. #ifdef __LITTLE_ENDIAN__
  61289. #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
  61290. uint64x1x2_t __s1 = __p1; \
  61291. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
  61292. })
  61293. #else
  61294. #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
  61295. uint64x1x2_t __s1 = __p1; \
  61296. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
  61297. })
  61298. #endif
  61299. #ifdef __LITTLE_ENDIAN__
  61300. #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
  61301. uint16x4x2_t __s1 = __p1; \
  61302. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
  61303. })
  61304. #else
  61305. #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
  61306. uint16x4x2_t __s1 = __p1; \
  61307. uint16x4x2_t __rev1; \
  61308. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61309. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61310. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
  61311. })
  61312. #endif
  61313. #ifdef __LITTLE_ENDIAN__
  61314. #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
  61315. int8x8x2_t __s1 = __p1; \
  61316. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
  61317. })
  61318. #else
  61319. #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
  61320. int8x8x2_t __s1 = __p1; \
  61321. int8x8x2_t __rev1; \
  61322. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61323. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61324. __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
  61325. })
  61326. #endif
  61327. #ifdef __LITTLE_ENDIAN__
  61328. #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
  61329. float64x1x2_t __s1 = __p1; \
  61330. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
  61331. })
  61332. #else
  61333. #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
  61334. float64x1x2_t __s1 = __p1; \
  61335. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
  61336. })
  61337. #endif
  61338. #ifdef __LITTLE_ENDIAN__
  61339. #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
  61340. float32x2x2_t __s1 = __p1; \
  61341. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
  61342. })
  61343. #else
  61344. #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
  61345. float32x2x2_t __s1 = __p1; \
  61346. float32x2x2_t __rev1; \
  61347. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61348. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61349. __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
  61350. })
  61351. #endif
  61352. #ifdef __LITTLE_ENDIAN__
  61353. #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
  61354. float16x4x2_t __s1 = __p1; \
  61355. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
  61356. })
  61357. #else
  61358. #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
  61359. float16x4x2_t __s1 = __p1; \
  61360. float16x4x2_t __rev1; \
  61361. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61362. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61363. __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
  61364. })
  61365. #endif
  61366. #ifdef __LITTLE_ENDIAN__
  61367. #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
  61368. int32x2x2_t __s1 = __p1; \
  61369. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
  61370. })
  61371. #else
  61372. #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
  61373. int32x2x2_t __s1 = __p1; \
  61374. int32x2x2_t __rev1; \
  61375. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61376. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61377. __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
  61378. })
  61379. #endif
  61380. #ifdef __LITTLE_ENDIAN__
  61381. #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
  61382. int64x1x2_t __s1 = __p1; \
  61383. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
  61384. })
  61385. #else
  61386. #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
  61387. int64x1x2_t __s1 = __p1; \
  61388. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
  61389. })
  61390. #endif
  61391. #ifdef __LITTLE_ENDIAN__
  61392. #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
  61393. int16x4x2_t __s1 = __p1; \
  61394. __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
  61395. })
  61396. #else
  61397. #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
  61398. int16x4x2_t __s1 = __p1; \
  61399. int16x4x2_t __rev1; \
  61400. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61401. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61402. __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
  61403. })
  61404. #endif
  61405. #ifdef __LITTLE_ENDIAN__
  61406. #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
  61407. poly8x8x3_t __s1 = __p1; \
  61408. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
  61409. })
  61410. #else
  61411. #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
  61412. poly8x8x3_t __s1 = __p1; \
  61413. poly8x8x3_t __rev1; \
  61414. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61415. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61416. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61417. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
  61418. })
  61419. #endif
  61420. #ifdef __LITTLE_ENDIAN__
  61421. #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
  61422. poly64x1x3_t __s1 = __p1; \
  61423. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
  61424. })
  61425. #else
  61426. #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
  61427. poly64x1x3_t __s1 = __p1; \
  61428. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
  61429. })
  61430. #endif
  61431. #ifdef __LITTLE_ENDIAN__
  61432. #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
  61433. poly16x4x3_t __s1 = __p1; \
  61434. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
  61435. })
  61436. #else
  61437. #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
  61438. poly16x4x3_t __s1 = __p1; \
  61439. poly16x4x3_t __rev1; \
  61440. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61441. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61442. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61443. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
  61444. })
  61445. #endif
  61446. #ifdef __LITTLE_ENDIAN__
  61447. #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
  61448. poly8x16x3_t __s1 = __p1; \
  61449. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
  61450. })
  61451. #else
  61452. #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
  61453. poly8x16x3_t __s1 = __p1; \
  61454. poly8x16x3_t __rev1; \
  61455. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61456. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61457. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61458. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
  61459. })
  61460. #endif
  61461. #ifdef __LITTLE_ENDIAN__
  61462. #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
  61463. poly64x2x3_t __s1 = __p1; \
  61464. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
  61465. })
  61466. #else
  61467. #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
  61468. poly64x2x3_t __s1 = __p1; \
  61469. poly64x2x3_t __rev1; \
  61470. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61471. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61472. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61473. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
  61474. })
  61475. #endif
  61476. #ifdef __LITTLE_ENDIAN__
  61477. #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
  61478. poly16x8x3_t __s1 = __p1; \
  61479. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
  61480. })
  61481. #else
  61482. #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
  61483. poly16x8x3_t __s1 = __p1; \
  61484. poly16x8x3_t __rev1; \
  61485. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61486. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61487. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61488. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
  61489. })
  61490. #endif
  61491. #ifdef __LITTLE_ENDIAN__
  61492. #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
  61493. uint8x16x3_t __s1 = __p1; \
  61494. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
  61495. })
  61496. #else
  61497. #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
  61498. uint8x16x3_t __s1 = __p1; \
  61499. uint8x16x3_t __rev1; \
  61500. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61501. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61502. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61503. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
  61504. })
  61505. #endif
  61506. #ifdef __LITTLE_ENDIAN__
  61507. #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
  61508. uint32x4x3_t __s1 = __p1; \
  61509. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
  61510. })
  61511. #else
  61512. #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
  61513. uint32x4x3_t __s1 = __p1; \
  61514. uint32x4x3_t __rev1; \
  61515. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61516. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61517. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61518. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
  61519. })
  61520. #endif
  61521. #ifdef __LITTLE_ENDIAN__
  61522. #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
  61523. uint64x2x3_t __s1 = __p1; \
  61524. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
  61525. })
  61526. #else
  61527. #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
  61528. uint64x2x3_t __s1 = __p1; \
  61529. uint64x2x3_t __rev1; \
  61530. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61531. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61532. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61533. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
  61534. })
  61535. #endif
  61536. #ifdef __LITTLE_ENDIAN__
  61537. #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
  61538. uint16x8x3_t __s1 = __p1; \
  61539. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
  61540. })
  61541. #else
  61542. #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
  61543. uint16x8x3_t __s1 = __p1; \
  61544. uint16x8x3_t __rev1; \
  61545. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61546. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61547. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61548. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
  61549. })
  61550. #endif
  61551. #ifdef __LITTLE_ENDIAN__
  61552. #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
  61553. int8x16x3_t __s1 = __p1; \
  61554. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
  61555. })
  61556. #else
  61557. #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
  61558. int8x16x3_t __s1 = __p1; \
  61559. int8x16x3_t __rev1; \
  61560. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61561. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61562. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61563. __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
  61564. })
  61565. #endif
  61566. #ifdef __LITTLE_ENDIAN__
  61567. #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
  61568. float64x2x3_t __s1 = __p1; \
  61569. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
  61570. })
  61571. #else
  61572. #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
  61573. float64x2x3_t __s1 = __p1; \
  61574. float64x2x3_t __rev1; \
  61575. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61576. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61577. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61578. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
  61579. })
  61580. #endif
  61581. #ifdef __LITTLE_ENDIAN__
  61582. #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
  61583. float32x4x3_t __s1 = __p1; \
  61584. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
  61585. })
  61586. #else
  61587. #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
  61588. float32x4x3_t __s1 = __p1; \
  61589. float32x4x3_t __rev1; \
  61590. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61591. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61592. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61593. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
  61594. })
  61595. #endif
  61596. #ifdef __LITTLE_ENDIAN__
  61597. #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
  61598. float16x8x3_t __s1 = __p1; \
  61599. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
  61600. })
  61601. #else
  61602. #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
  61603. float16x8x3_t __s1 = __p1; \
  61604. float16x8x3_t __rev1; \
  61605. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61606. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61607. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61608. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
  61609. })
  61610. #endif
  61611. #ifdef __LITTLE_ENDIAN__
  61612. #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
  61613. int32x4x3_t __s1 = __p1; \
  61614. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
  61615. })
  61616. #else
  61617. #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
  61618. int32x4x3_t __s1 = __p1; \
  61619. int32x4x3_t __rev1; \
  61620. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61621. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61622. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61623. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
  61624. })
  61625. #endif
  61626. #ifdef __LITTLE_ENDIAN__
  61627. #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
  61628. int64x2x3_t __s1 = __p1; \
  61629. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
  61630. })
  61631. #else
  61632. #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
  61633. int64x2x3_t __s1 = __p1; \
  61634. int64x2x3_t __rev1; \
  61635. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61636. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61637. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61638. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
  61639. })
  61640. #endif
  61641. #ifdef __LITTLE_ENDIAN__
  61642. #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
  61643. int16x8x3_t __s1 = __p1; \
  61644. __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
  61645. })
  61646. #else
  61647. #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
  61648. int16x8x3_t __s1 = __p1; \
  61649. int16x8x3_t __rev1; \
  61650. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61651. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61652. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61653. __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
  61654. })
  61655. #endif
  61656. #ifdef __LITTLE_ENDIAN__
  61657. #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
  61658. uint8x8x3_t __s1 = __p1; \
  61659. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
  61660. })
  61661. #else
  61662. #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
  61663. uint8x8x3_t __s1 = __p1; \
  61664. uint8x8x3_t __rev1; \
  61665. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61666. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61667. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61668. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
  61669. })
  61670. #endif
  61671. #ifdef __LITTLE_ENDIAN__
  61672. #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
  61673. uint32x2x3_t __s1 = __p1; \
  61674. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
  61675. })
  61676. #else
  61677. #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
  61678. uint32x2x3_t __s1 = __p1; \
  61679. uint32x2x3_t __rev1; \
  61680. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61681. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61682. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61683. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
  61684. })
  61685. #endif
  61686. #ifdef __LITTLE_ENDIAN__
  61687. #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
  61688. uint64x1x3_t __s1 = __p1; \
  61689. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
  61690. })
  61691. #else
  61692. #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
  61693. uint64x1x3_t __s1 = __p1; \
  61694. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
  61695. })
  61696. #endif
  61697. #ifdef __LITTLE_ENDIAN__
  61698. #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
  61699. uint16x4x3_t __s1 = __p1; \
  61700. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
  61701. })
  61702. #else
  61703. #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
  61704. uint16x4x3_t __s1 = __p1; \
  61705. uint16x4x3_t __rev1; \
  61706. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61707. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61708. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61709. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
  61710. })
  61711. #endif
  61712. #ifdef __LITTLE_ENDIAN__
  61713. #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
  61714. int8x8x3_t __s1 = __p1; \
  61715. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
  61716. })
  61717. #else
  61718. #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
  61719. int8x8x3_t __s1 = __p1; \
  61720. int8x8x3_t __rev1; \
  61721. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61722. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61723. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61724. __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
  61725. })
  61726. #endif
  61727. #ifdef __LITTLE_ENDIAN__
  61728. #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
  61729. float64x1x3_t __s1 = __p1; \
  61730. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
  61731. })
  61732. #else
  61733. #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
  61734. float64x1x3_t __s1 = __p1; \
  61735. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
  61736. })
  61737. #endif
  61738. #ifdef __LITTLE_ENDIAN__
  61739. #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
  61740. float32x2x3_t __s1 = __p1; \
  61741. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
  61742. })
  61743. #else
  61744. #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
  61745. float32x2x3_t __s1 = __p1; \
  61746. float32x2x3_t __rev1; \
  61747. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61748. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61749. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61750. __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
  61751. })
  61752. #endif
  61753. #ifdef __LITTLE_ENDIAN__
  61754. #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
  61755. float16x4x3_t __s1 = __p1; \
  61756. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
  61757. })
  61758. #else
  61759. #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
  61760. float16x4x3_t __s1 = __p1; \
  61761. float16x4x3_t __rev1; \
  61762. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61763. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61764. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61765. __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
  61766. })
  61767. #endif
  61768. #ifdef __LITTLE_ENDIAN__
  61769. #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
  61770. int32x2x3_t __s1 = __p1; \
  61771. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
  61772. })
  61773. #else
  61774. #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
  61775. int32x2x3_t __s1 = __p1; \
  61776. int32x2x3_t __rev1; \
  61777. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61778. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61779. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61780. __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
  61781. })
  61782. #endif
  61783. #ifdef __LITTLE_ENDIAN__
  61784. #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
  61785. int64x1x3_t __s1 = __p1; \
  61786. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
  61787. })
  61788. #else
  61789. #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
  61790. int64x1x3_t __s1 = __p1; \
  61791. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
  61792. })
  61793. #endif
  61794. #ifdef __LITTLE_ENDIAN__
  61795. #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
  61796. int16x4x3_t __s1 = __p1; \
  61797. __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
  61798. })
  61799. #else
  61800. #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
  61801. int16x4x3_t __s1 = __p1; \
  61802. int16x4x3_t __rev1; \
  61803. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61804. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61805. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61806. __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
  61807. })
  61808. #endif
  61809. #ifdef __LITTLE_ENDIAN__
  61810. #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
  61811. poly8x8x4_t __s1 = __p1; \
  61812. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
  61813. })
  61814. #else
  61815. #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
  61816. poly8x8x4_t __s1 = __p1; \
  61817. poly8x8x4_t __rev1; \
  61818. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61819. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61820. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61821. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  61822. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
  61823. })
  61824. #endif
  61825. #ifdef __LITTLE_ENDIAN__
  61826. #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
  61827. poly64x1x4_t __s1 = __p1; \
  61828. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
  61829. })
  61830. #else
  61831. #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
  61832. poly64x1x4_t __s1 = __p1; \
  61833. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
  61834. })
  61835. #endif
  61836. #ifdef __LITTLE_ENDIAN__
  61837. #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
  61838. poly16x4x4_t __s1 = __p1; \
  61839. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
  61840. })
  61841. #else
  61842. #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
  61843. poly16x4x4_t __s1 = __p1; \
  61844. poly16x4x4_t __rev1; \
  61845. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61846. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61847. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61848. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  61849. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
  61850. })
  61851. #endif
  61852. #ifdef __LITTLE_ENDIAN__
  61853. #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
  61854. poly8x16x4_t __s1 = __p1; \
  61855. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
  61856. })
  61857. #else
  61858. #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
  61859. poly8x16x4_t __s1 = __p1; \
  61860. poly8x16x4_t __rev1; \
  61861. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61862. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61863. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61864. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61865. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
  61866. })
  61867. #endif
  61868. #ifdef __LITTLE_ENDIAN__
  61869. #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
  61870. poly64x2x4_t __s1 = __p1; \
  61871. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
  61872. })
  61873. #else
  61874. #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
  61875. poly64x2x4_t __s1 = __p1; \
  61876. poly64x2x4_t __rev1; \
  61877. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61878. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61879. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61880. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  61881. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
  61882. })
  61883. #endif
  61884. #ifdef __LITTLE_ENDIAN__
  61885. #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
  61886. poly16x8x4_t __s1 = __p1; \
  61887. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
  61888. })
  61889. #else
  61890. #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
  61891. poly16x8x4_t __s1 = __p1; \
  61892. poly16x8x4_t __rev1; \
  61893. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61894. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61895. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61896. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  61897. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
  61898. })
  61899. #endif
  61900. #ifdef __LITTLE_ENDIAN__
  61901. #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
  61902. uint8x16x4_t __s1 = __p1; \
  61903. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
  61904. })
  61905. #else
  61906. #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
  61907. uint8x16x4_t __s1 = __p1; \
  61908. uint8x16x4_t __rev1; \
  61909. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61910. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61911. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61912. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61913. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
  61914. })
  61915. #endif
  61916. #ifdef __LITTLE_ENDIAN__
  61917. #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
  61918. uint32x4x4_t __s1 = __p1; \
  61919. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
  61920. })
  61921. #else
  61922. #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
  61923. uint32x4x4_t __s1 = __p1; \
  61924. uint32x4x4_t __rev1; \
  61925. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  61926. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  61927. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  61928. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  61929. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
  61930. })
  61931. #endif
  61932. #ifdef __LITTLE_ENDIAN__
  61933. #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
  61934. uint64x2x4_t __s1 = __p1; \
  61935. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
  61936. })
  61937. #else
  61938. #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
  61939. uint64x2x4_t __s1 = __p1; \
  61940. uint64x2x4_t __rev1; \
  61941. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61942. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61943. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61944. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  61945. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
  61946. })
  61947. #endif
  61948. #ifdef __LITTLE_ENDIAN__
  61949. #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
  61950. uint16x8x4_t __s1 = __p1; \
  61951. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
  61952. })
  61953. #else
  61954. #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
  61955. uint16x8x4_t __s1 = __p1; \
  61956. uint16x8x4_t __rev1; \
  61957. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  61958. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  61959. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  61960. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  61961. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
  61962. })
  61963. #endif
  61964. #ifdef __LITTLE_ENDIAN__
  61965. #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
  61966. int8x16x4_t __s1 = __p1; \
  61967. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
  61968. })
  61969. #else
  61970. #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
  61971. int8x16x4_t __s1 = __p1; \
  61972. int8x16x4_t __rev1; \
  61973. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61974. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61975. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61976. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  61977. __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
  61978. })
  61979. #endif
  61980. #ifdef __LITTLE_ENDIAN__
  61981. #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
  61982. float64x2x4_t __s1 = __p1; \
  61983. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
  61984. })
  61985. #else
  61986. #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
  61987. float64x2x4_t __s1 = __p1; \
  61988. float64x2x4_t __rev1; \
  61989. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  61990. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  61991. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  61992. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  61993. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
  61994. })
  61995. #endif
  61996. #ifdef __LITTLE_ENDIAN__
  61997. #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
  61998. float32x4x4_t __s1 = __p1; \
  61999. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
  62000. })
  62001. #else
  62002. #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
  62003. float32x4x4_t __s1 = __p1; \
  62004. float32x4x4_t __rev1; \
  62005. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  62006. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  62007. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  62008. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  62009. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
  62010. })
  62011. #endif
  62012. #ifdef __LITTLE_ENDIAN__
  62013. #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
  62014. float16x8x4_t __s1 = __p1; \
  62015. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
  62016. })
  62017. #else
  62018. #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
  62019. float16x8x4_t __s1 = __p1; \
  62020. float16x8x4_t __rev1; \
  62021. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  62022. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  62023. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  62024. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  62025. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
  62026. })
  62027. #endif
  62028. #ifdef __LITTLE_ENDIAN__
  62029. #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
  62030. int32x4x4_t __s1 = __p1; \
  62031. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
  62032. })
  62033. #else
  62034. #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
  62035. int32x4x4_t __s1 = __p1; \
  62036. int32x4x4_t __rev1; \
  62037. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  62038. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  62039. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  62040. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  62041. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
  62042. })
  62043. #endif
  62044. #ifdef __LITTLE_ENDIAN__
  62045. #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
  62046. int64x2x4_t __s1 = __p1; \
  62047. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
  62048. })
  62049. #else
  62050. #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
  62051. int64x2x4_t __s1 = __p1; \
  62052. int64x2x4_t __rev1; \
  62053. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62054. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62055. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62056. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62057. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
  62058. })
  62059. #endif
  62060. #ifdef __LITTLE_ENDIAN__
  62061. #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
  62062. int16x8x4_t __s1 = __p1; \
  62063. __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
  62064. })
  62065. #else
  62066. #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
  62067. int16x8x4_t __s1 = __p1; \
  62068. int16x8x4_t __rev1; \
  62069. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  62070. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  62071. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  62072. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  62073. __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
  62074. })
  62075. #endif
  62076. #ifdef __LITTLE_ENDIAN__
  62077. #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
  62078. uint8x8x4_t __s1 = __p1; \
  62079. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
  62080. })
  62081. #else
  62082. #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
  62083. uint8x8x4_t __s1 = __p1; \
  62084. uint8x8x4_t __rev1; \
  62085. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  62086. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  62087. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  62088. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  62089. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
  62090. })
  62091. #endif
  62092. #ifdef __LITTLE_ENDIAN__
  62093. #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
  62094. uint32x2x4_t __s1 = __p1; \
  62095. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
  62096. })
  62097. #else
  62098. #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
  62099. uint32x2x4_t __s1 = __p1; \
  62100. uint32x2x4_t __rev1; \
  62101. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62102. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62103. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62104. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62105. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
  62106. })
  62107. #endif
  62108. #ifdef __LITTLE_ENDIAN__
  62109. #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
  62110. uint64x1x4_t __s1 = __p1; \
  62111. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
  62112. })
  62113. #else
  62114. #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
  62115. uint64x1x4_t __s1 = __p1; \
  62116. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
  62117. })
  62118. #endif
  62119. #ifdef __LITTLE_ENDIAN__
  62120. #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
  62121. uint16x4x4_t __s1 = __p1; \
  62122. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
  62123. })
  62124. #else
  62125. #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
  62126. uint16x4x4_t __s1 = __p1; \
  62127. uint16x4x4_t __rev1; \
  62128. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  62129. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  62130. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  62131. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  62132. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
  62133. })
  62134. #endif
  62135. #ifdef __LITTLE_ENDIAN__
  62136. #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
  62137. int8x8x4_t __s1 = __p1; \
  62138. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
  62139. })
  62140. #else
  62141. #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
  62142. int8x8x4_t __s1 = __p1; \
  62143. int8x8x4_t __rev1; \
  62144. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
  62145. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
  62146. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
  62147. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
  62148. __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
  62149. })
  62150. #endif
  62151. #ifdef __LITTLE_ENDIAN__
  62152. #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
  62153. float64x1x4_t __s1 = __p1; \
  62154. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
  62155. })
  62156. #else
  62157. #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
  62158. float64x1x4_t __s1 = __p1; \
  62159. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
  62160. })
  62161. #endif
  62162. #ifdef __LITTLE_ENDIAN__
  62163. #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
  62164. float32x2x4_t __s1 = __p1; \
  62165. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
  62166. })
  62167. #else
  62168. #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
  62169. float32x2x4_t __s1 = __p1; \
  62170. float32x2x4_t __rev1; \
  62171. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62172. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62173. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62174. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62175. __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
  62176. })
  62177. #endif
  62178. #ifdef __LITTLE_ENDIAN__
  62179. #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
  62180. float16x4x4_t __s1 = __p1; \
  62181. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
  62182. })
  62183. #else
  62184. #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
  62185. float16x4x4_t __s1 = __p1; \
  62186. float16x4x4_t __rev1; \
  62187. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  62188. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  62189. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  62190. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  62191. __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
  62192. })
  62193. #endif
  62194. #ifdef __LITTLE_ENDIAN__
  62195. #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
  62196. int32x2x4_t __s1 = __p1; \
  62197. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
  62198. })
  62199. #else
  62200. #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
  62201. int32x2x4_t __s1 = __p1; \
  62202. int32x2x4_t __rev1; \
  62203. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62204. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62205. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62206. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62207. __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
  62208. })
  62209. #endif
  62210. #ifdef __LITTLE_ENDIAN__
  62211. #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
  62212. int64x1x4_t __s1 = __p1; \
  62213. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
  62214. })
  62215. #else
  62216. #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
  62217. int64x1x4_t __s1 = __p1; \
  62218. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
  62219. })
  62220. #endif
  62221. #ifdef __LITTLE_ENDIAN__
  62222. #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
  62223. int16x4x4_t __s1 = __p1; \
  62224. __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
  62225. })
  62226. #else
  62227. #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
  62228. int16x4x4_t __s1 = __p1; \
  62229. int16x4x4_t __rev1; \
  62230. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
  62231. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
  62232. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
  62233. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
  62234. __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
  62235. })
  62236. #endif
  62237. #ifdef __LITTLE_ENDIAN__
  62238. #define vst2_p64(__p0, __p1) __extension__ ({ \
  62239. poly64x1x2_t __s1 = __p1; \
  62240. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
  62241. })
  62242. #else
  62243. #define vst2_p64(__p0, __p1) __extension__ ({ \
  62244. poly64x1x2_t __s1 = __p1; \
  62245. __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
  62246. })
  62247. #endif
  62248. #ifdef __LITTLE_ENDIAN__
  62249. #define vst2q_p64(__p0, __p1) __extension__ ({ \
  62250. poly64x2x2_t __s1 = __p1; \
  62251. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
  62252. })
  62253. #else
  62254. #define vst2q_p64(__p0, __p1) __extension__ ({ \
  62255. poly64x2x2_t __s1 = __p1; \
  62256. poly64x2x2_t __rev1; \
  62257. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62258. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62259. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
  62260. })
  62261. #endif
  62262. #ifdef __LITTLE_ENDIAN__
  62263. #define vst2q_u64(__p0, __p1) __extension__ ({ \
  62264. uint64x2x2_t __s1 = __p1; \
  62265. __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
  62266. })
  62267. #else
  62268. #define vst2q_u64(__p0, __p1) __extension__ ({ \
  62269. uint64x2x2_t __s1 = __p1; \
  62270. uint64x2x2_t __rev1; \
  62271. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62272. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62273. __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
  62274. })
  62275. #endif
  62276. #ifdef __LITTLE_ENDIAN__
  62277. #define vst2q_f64(__p0, __p1) __extension__ ({ \
  62278. float64x2x2_t __s1 = __p1; \
  62279. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
  62280. })
  62281. #else
  62282. #define vst2q_f64(__p0, __p1) __extension__ ({ \
  62283. float64x2x2_t __s1 = __p1; \
  62284. float64x2x2_t __rev1; \
  62285. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62286. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62287. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
  62288. })
  62289. #endif
  62290. #ifdef __LITTLE_ENDIAN__
  62291. #define vst2q_s64(__p0, __p1) __extension__ ({ \
  62292. int64x2x2_t __s1 = __p1; \
  62293. __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
  62294. })
  62295. #else
  62296. #define vst2q_s64(__p0, __p1) __extension__ ({ \
  62297. int64x2x2_t __s1 = __p1; \
  62298. int64x2x2_t __rev1; \
  62299. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62300. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62301. __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
  62302. })
  62303. #endif
  62304. #ifdef __LITTLE_ENDIAN__
  62305. #define vst2_f64(__p0, __p1) __extension__ ({ \
  62306. float64x1x2_t __s1 = __p1; \
  62307. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
  62308. })
  62309. #else
  62310. #define vst2_f64(__p0, __p1) __extension__ ({ \
  62311. float64x1x2_t __s1 = __p1; \
  62312. __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
  62313. })
  62314. #endif
  62315. #ifdef __LITTLE_ENDIAN__
  62316. #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62317. poly64x1x2_t __s1 = __p1; \
  62318. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
  62319. })
  62320. #else
  62321. #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62322. poly64x1x2_t __s1 = __p1; \
  62323. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
  62324. })
  62325. #endif
  62326. #ifdef __LITTLE_ENDIAN__
  62327. #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62328. poly8x16x2_t __s1 = __p1; \
  62329. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
  62330. })
  62331. #else
  62332. #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62333. poly8x16x2_t __s1 = __p1; \
  62334. poly8x16x2_t __rev1; \
  62335. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62336. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62337. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
  62338. })
  62339. #endif
  62340. #ifdef __LITTLE_ENDIAN__
  62341. #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62342. poly64x2x2_t __s1 = __p1; \
  62343. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
  62344. })
  62345. #else
  62346. #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62347. poly64x2x2_t __s1 = __p1; \
  62348. poly64x2x2_t __rev1; \
  62349. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62350. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62351. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
  62352. })
  62353. #endif
  62354. #ifdef __LITTLE_ENDIAN__
  62355. #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62356. uint8x16x2_t __s1 = __p1; \
  62357. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
  62358. })
  62359. #else
  62360. #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62361. uint8x16x2_t __s1 = __p1; \
  62362. uint8x16x2_t __rev1; \
  62363. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62364. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62365. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
  62366. })
  62367. #endif
  62368. #ifdef __LITTLE_ENDIAN__
  62369. #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62370. uint64x2x2_t __s1 = __p1; \
  62371. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
  62372. })
  62373. #else
  62374. #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62375. uint64x2x2_t __s1 = __p1; \
  62376. uint64x2x2_t __rev1; \
  62377. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62378. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62379. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
  62380. })
  62381. #endif
  62382. #ifdef __LITTLE_ENDIAN__
  62383. #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62384. int8x16x2_t __s1 = __p1; \
  62385. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
  62386. })
  62387. #else
  62388. #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62389. int8x16x2_t __s1 = __p1; \
  62390. int8x16x2_t __rev1; \
  62391. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62392. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62393. __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
  62394. })
  62395. #endif
  62396. #ifdef __LITTLE_ENDIAN__
  62397. #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62398. float64x2x2_t __s1 = __p1; \
  62399. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
  62400. })
  62401. #else
  62402. #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62403. float64x2x2_t __s1 = __p1; \
  62404. float64x2x2_t __rev1; \
  62405. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62406. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62407. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
  62408. })
  62409. #endif
  62410. #ifdef __LITTLE_ENDIAN__
  62411. #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62412. int64x2x2_t __s1 = __p1; \
  62413. __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
  62414. })
  62415. #else
  62416. #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62417. int64x2x2_t __s1 = __p1; \
  62418. int64x2x2_t __rev1; \
  62419. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62420. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62421. __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
  62422. })
  62423. #endif
  62424. #ifdef __LITTLE_ENDIAN__
  62425. #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62426. uint64x1x2_t __s1 = __p1; \
  62427. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
  62428. })
  62429. #else
  62430. #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62431. uint64x1x2_t __s1 = __p1; \
  62432. __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
  62433. })
  62434. #endif
  62435. #ifdef __LITTLE_ENDIAN__
  62436. #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62437. float64x1x2_t __s1 = __p1; \
  62438. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
  62439. })
  62440. #else
  62441. #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62442. float64x1x2_t __s1 = __p1; \
  62443. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
  62444. })
  62445. #endif
  62446. #ifdef __LITTLE_ENDIAN__
  62447. #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62448. int64x1x2_t __s1 = __p1; \
  62449. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
  62450. })
  62451. #else
  62452. #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62453. int64x1x2_t __s1 = __p1; \
  62454. __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
  62455. })
  62456. #endif
  62457. #ifdef __LITTLE_ENDIAN__
  62458. #define vst3_p64(__p0, __p1) __extension__ ({ \
  62459. poly64x1x3_t __s1 = __p1; \
  62460. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
  62461. })
  62462. #else
  62463. #define vst3_p64(__p0, __p1) __extension__ ({ \
  62464. poly64x1x3_t __s1 = __p1; \
  62465. __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
  62466. })
  62467. #endif
  62468. #ifdef __LITTLE_ENDIAN__
  62469. #define vst3q_p64(__p0, __p1) __extension__ ({ \
  62470. poly64x2x3_t __s1 = __p1; \
  62471. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
  62472. })
  62473. #else
  62474. #define vst3q_p64(__p0, __p1) __extension__ ({ \
  62475. poly64x2x3_t __s1 = __p1; \
  62476. poly64x2x3_t __rev1; \
  62477. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62478. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62479. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62480. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
  62481. })
  62482. #endif
  62483. #ifdef __LITTLE_ENDIAN__
  62484. #define vst3q_u64(__p0, __p1) __extension__ ({ \
  62485. uint64x2x3_t __s1 = __p1; \
  62486. __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
  62487. })
  62488. #else
  62489. #define vst3q_u64(__p0, __p1) __extension__ ({ \
  62490. uint64x2x3_t __s1 = __p1; \
  62491. uint64x2x3_t __rev1; \
  62492. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62493. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62494. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62495. __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
  62496. })
  62497. #endif
  62498. #ifdef __LITTLE_ENDIAN__
  62499. #define vst3q_f64(__p0, __p1) __extension__ ({ \
  62500. float64x2x3_t __s1 = __p1; \
  62501. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
  62502. })
  62503. #else
  62504. #define vst3q_f64(__p0, __p1) __extension__ ({ \
  62505. float64x2x3_t __s1 = __p1; \
  62506. float64x2x3_t __rev1; \
  62507. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62508. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62509. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62510. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
  62511. })
  62512. #endif
  62513. #ifdef __LITTLE_ENDIAN__
  62514. #define vst3q_s64(__p0, __p1) __extension__ ({ \
  62515. int64x2x3_t __s1 = __p1; \
  62516. __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
  62517. })
  62518. #else
  62519. #define vst3q_s64(__p0, __p1) __extension__ ({ \
  62520. int64x2x3_t __s1 = __p1; \
  62521. int64x2x3_t __rev1; \
  62522. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62523. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62524. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62525. __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
  62526. })
  62527. #endif
  62528. #ifdef __LITTLE_ENDIAN__
  62529. #define vst3_f64(__p0, __p1) __extension__ ({ \
  62530. float64x1x3_t __s1 = __p1; \
  62531. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
  62532. })
  62533. #else
  62534. #define vst3_f64(__p0, __p1) __extension__ ({ \
  62535. float64x1x3_t __s1 = __p1; \
  62536. __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
  62537. })
  62538. #endif
  62539. #ifdef __LITTLE_ENDIAN__
  62540. #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62541. poly64x1x3_t __s1 = __p1; \
  62542. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
  62543. })
  62544. #else
  62545. #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62546. poly64x1x3_t __s1 = __p1; \
  62547. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
  62548. })
  62549. #endif
  62550. #ifdef __LITTLE_ENDIAN__
  62551. #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62552. poly8x16x3_t __s1 = __p1; \
  62553. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
  62554. })
  62555. #else
  62556. #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62557. poly8x16x3_t __s1 = __p1; \
  62558. poly8x16x3_t __rev1; \
  62559. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62560. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62561. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62562. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
  62563. })
  62564. #endif
  62565. #ifdef __LITTLE_ENDIAN__
  62566. #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62567. poly64x2x3_t __s1 = __p1; \
  62568. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
  62569. })
  62570. #else
  62571. #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62572. poly64x2x3_t __s1 = __p1; \
  62573. poly64x2x3_t __rev1; \
  62574. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62575. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62576. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62577. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
  62578. })
  62579. #endif
  62580. #ifdef __LITTLE_ENDIAN__
  62581. #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62582. uint8x16x3_t __s1 = __p1; \
  62583. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
  62584. })
  62585. #else
  62586. #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62587. uint8x16x3_t __s1 = __p1; \
  62588. uint8x16x3_t __rev1; \
  62589. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62590. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62591. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62592. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
  62593. })
  62594. #endif
  62595. #ifdef __LITTLE_ENDIAN__
  62596. #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62597. uint64x2x3_t __s1 = __p1; \
  62598. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
  62599. })
  62600. #else
  62601. #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62602. uint64x2x3_t __s1 = __p1; \
  62603. uint64x2x3_t __rev1; \
  62604. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62605. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62606. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62607. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
  62608. })
  62609. #endif
  62610. #ifdef __LITTLE_ENDIAN__
  62611. #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62612. int8x16x3_t __s1 = __p1; \
  62613. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
  62614. })
  62615. #else
  62616. #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62617. int8x16x3_t __s1 = __p1; \
  62618. int8x16x3_t __rev1; \
  62619. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62620. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62621. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62622. __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
  62623. })
  62624. #endif
  62625. #ifdef __LITTLE_ENDIAN__
  62626. #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62627. float64x2x3_t __s1 = __p1; \
  62628. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
  62629. })
  62630. #else
  62631. #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62632. float64x2x3_t __s1 = __p1; \
  62633. float64x2x3_t __rev1; \
  62634. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62635. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62636. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62637. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
  62638. })
  62639. #endif
  62640. #ifdef __LITTLE_ENDIAN__
  62641. #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62642. int64x2x3_t __s1 = __p1; \
  62643. __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
  62644. })
  62645. #else
  62646. #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62647. int64x2x3_t __s1 = __p1; \
  62648. int64x2x3_t __rev1; \
  62649. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62650. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62651. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62652. __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
  62653. })
  62654. #endif
  62655. #ifdef __LITTLE_ENDIAN__
  62656. #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62657. uint64x1x3_t __s1 = __p1; \
  62658. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
  62659. })
  62660. #else
  62661. #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62662. uint64x1x3_t __s1 = __p1; \
  62663. __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
  62664. })
  62665. #endif
  62666. #ifdef __LITTLE_ENDIAN__
  62667. #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62668. float64x1x3_t __s1 = __p1; \
  62669. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
  62670. })
  62671. #else
  62672. #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62673. float64x1x3_t __s1 = __p1; \
  62674. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
  62675. })
  62676. #endif
  62677. #ifdef __LITTLE_ENDIAN__
  62678. #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62679. int64x1x3_t __s1 = __p1; \
  62680. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
  62681. })
  62682. #else
  62683. #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62684. int64x1x3_t __s1 = __p1; \
  62685. __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
  62686. })
  62687. #endif
  62688. #ifdef __LITTLE_ENDIAN__
  62689. #define vst4_p64(__p0, __p1) __extension__ ({ \
  62690. poly64x1x4_t __s1 = __p1; \
  62691. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
  62692. })
  62693. #else
  62694. #define vst4_p64(__p0, __p1) __extension__ ({ \
  62695. poly64x1x4_t __s1 = __p1; \
  62696. __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
  62697. })
  62698. #endif
  62699. #ifdef __LITTLE_ENDIAN__
  62700. #define vst4q_p64(__p0, __p1) __extension__ ({ \
  62701. poly64x2x4_t __s1 = __p1; \
  62702. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
  62703. })
  62704. #else
  62705. #define vst4q_p64(__p0, __p1) __extension__ ({ \
  62706. poly64x2x4_t __s1 = __p1; \
  62707. poly64x2x4_t __rev1; \
  62708. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62709. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62710. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62711. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62712. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
  62713. })
  62714. #endif
  62715. #ifdef __LITTLE_ENDIAN__
  62716. #define vst4q_u64(__p0, __p1) __extension__ ({ \
  62717. uint64x2x4_t __s1 = __p1; \
  62718. __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
  62719. })
  62720. #else
  62721. #define vst4q_u64(__p0, __p1) __extension__ ({ \
  62722. uint64x2x4_t __s1 = __p1; \
  62723. uint64x2x4_t __rev1; \
  62724. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62725. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62726. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62727. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62728. __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
  62729. })
  62730. #endif
  62731. #ifdef __LITTLE_ENDIAN__
  62732. #define vst4q_f64(__p0, __p1) __extension__ ({ \
  62733. float64x2x4_t __s1 = __p1; \
  62734. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
  62735. })
  62736. #else
  62737. #define vst4q_f64(__p0, __p1) __extension__ ({ \
  62738. float64x2x4_t __s1 = __p1; \
  62739. float64x2x4_t __rev1; \
  62740. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62741. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62742. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62743. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62744. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
  62745. })
  62746. #endif
  62747. #ifdef __LITTLE_ENDIAN__
  62748. #define vst4q_s64(__p0, __p1) __extension__ ({ \
  62749. int64x2x4_t __s1 = __p1; \
  62750. __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
  62751. })
  62752. #else
  62753. #define vst4q_s64(__p0, __p1) __extension__ ({ \
  62754. int64x2x4_t __s1 = __p1; \
  62755. int64x2x4_t __rev1; \
  62756. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62757. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62758. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62759. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62760. __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
  62761. })
  62762. #endif
  62763. #ifdef __LITTLE_ENDIAN__
  62764. #define vst4_f64(__p0, __p1) __extension__ ({ \
  62765. float64x1x4_t __s1 = __p1; \
  62766. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
  62767. })
  62768. #else
  62769. #define vst4_f64(__p0, __p1) __extension__ ({ \
  62770. float64x1x4_t __s1 = __p1; \
  62771. __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
  62772. })
  62773. #endif
  62774. #ifdef __LITTLE_ENDIAN__
  62775. #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62776. poly64x1x4_t __s1 = __p1; \
  62777. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
  62778. })
  62779. #else
  62780. #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62781. poly64x1x4_t __s1 = __p1; \
  62782. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
  62783. })
  62784. #endif
  62785. #ifdef __LITTLE_ENDIAN__
  62786. #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62787. poly8x16x4_t __s1 = __p1; \
  62788. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
  62789. })
  62790. #else
  62791. #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
  62792. poly8x16x4_t __s1 = __p1; \
  62793. poly8x16x4_t __rev1; \
  62794. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62795. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62796. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62797. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62798. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
  62799. })
  62800. #endif
  62801. #ifdef __LITTLE_ENDIAN__
  62802. #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62803. poly64x2x4_t __s1 = __p1; \
  62804. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
  62805. })
  62806. #else
  62807. #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
  62808. poly64x2x4_t __s1 = __p1; \
  62809. poly64x2x4_t __rev1; \
  62810. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62811. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62812. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62813. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62814. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
  62815. })
  62816. #endif
  62817. #ifdef __LITTLE_ENDIAN__
  62818. #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62819. uint8x16x4_t __s1 = __p1; \
  62820. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
  62821. })
  62822. #else
  62823. #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
  62824. uint8x16x4_t __s1 = __p1; \
  62825. uint8x16x4_t __rev1; \
  62826. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62827. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62828. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62829. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62830. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
  62831. })
  62832. #endif
  62833. #ifdef __LITTLE_ENDIAN__
  62834. #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62835. uint64x2x4_t __s1 = __p1; \
  62836. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
  62837. })
  62838. #else
  62839. #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62840. uint64x2x4_t __s1 = __p1; \
  62841. uint64x2x4_t __rev1; \
  62842. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62843. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62844. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62845. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62846. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
  62847. })
  62848. #endif
  62849. #ifdef __LITTLE_ENDIAN__
  62850. #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62851. int8x16x4_t __s1 = __p1; \
  62852. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
  62853. })
  62854. #else
  62855. #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
  62856. int8x16x4_t __s1 = __p1; \
  62857. int8x16x4_t __rev1; \
  62858. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62859. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62860. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62861. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
  62862. __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
  62863. })
  62864. #endif
  62865. #ifdef __LITTLE_ENDIAN__
  62866. #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62867. float64x2x4_t __s1 = __p1; \
  62868. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
  62869. })
  62870. #else
  62871. #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62872. float64x2x4_t __s1 = __p1; \
  62873. float64x2x4_t __rev1; \
  62874. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62875. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62876. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62877. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62878. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
  62879. })
  62880. #endif
  62881. #ifdef __LITTLE_ENDIAN__
  62882. #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62883. int64x2x4_t __s1 = __p1; \
  62884. __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
  62885. })
  62886. #else
  62887. #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62888. int64x2x4_t __s1 = __p1; \
  62889. int64x2x4_t __rev1; \
  62890. __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
  62891. __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
  62892. __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
  62893. __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
  62894. __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
  62895. })
  62896. #endif
  62897. #ifdef __LITTLE_ENDIAN__
  62898. #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62899. uint64x1x4_t __s1 = __p1; \
  62900. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
  62901. })
  62902. #else
  62903. #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
  62904. uint64x1x4_t __s1 = __p1; \
  62905. __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
  62906. })
  62907. #endif
  62908. #ifdef __LITTLE_ENDIAN__
  62909. #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62910. float64x1x4_t __s1 = __p1; \
  62911. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
  62912. })
  62913. #else
  62914. #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
  62915. float64x1x4_t __s1 = __p1; \
  62916. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
  62917. })
  62918. #endif
  62919. #ifdef __LITTLE_ENDIAN__
  62920. #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62921. int64x1x4_t __s1 = __p1; \
  62922. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
  62923. })
  62924. #else
  62925. #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
  62926. int64x1x4_t __s1 = __p1; \
  62927. __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
  62928. })
  62929. #endif
  62930. #ifdef __LITTLE_ENDIAN__
  62931. #define vstrq_p128(__p0, __p1) __extension__ ({ \
  62932. poly128_t __s1 = __p1; \
  62933. __builtin_neon_vstrq_p128(__p0, __s1); \
  62934. })
  62935. #else
  62936. #define vstrq_p128(__p0, __p1) __extension__ ({ \
  62937. poly128_t __s1 = __p1; \
  62938. __builtin_neon_vstrq_p128(__p0, __s1); \
  62939. })
  62940. #endif
  62941. #ifdef __LITTLE_ENDIAN__
  62942. __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
  62943. uint64_t __ret;
  62944. __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
  62945. return __ret;
  62946. }
  62947. #else
  62948. __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
  62949. uint64_t __ret;
  62950. __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
  62951. return __ret;
  62952. }
  62953. #endif
  62954. #ifdef __LITTLE_ENDIAN__
  62955. __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
  62956. int64_t __ret;
  62957. __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
  62958. return __ret;
  62959. }
  62960. #else
  62961. __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
  62962. int64_t __ret;
  62963. __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
  62964. return __ret;
  62965. }
  62966. #endif
  62967. #ifdef __LITTLE_ENDIAN__
  62968. __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
  62969. float64x2_t __ret;
  62970. __ret = __p0 - __p1;
  62971. return __ret;
  62972. }
  62973. #else
  62974. __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
  62975. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  62976. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  62977. float64x2_t __ret;
  62978. __ret = __rev0 - __rev1;
  62979. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  62980. return __ret;
  62981. }
  62982. #endif
  62983. #ifdef __LITTLE_ENDIAN__
  62984. __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
  62985. float64x1_t __ret;
  62986. __ret = __p0 - __p1;
  62987. return __ret;
  62988. }
  62989. #else
  62990. __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
  62991. float64x1_t __ret;
  62992. __ret = __p0 - __p1;
  62993. return __ret;
  62994. }
  62995. #endif
  62996. #ifdef __LITTLE_ENDIAN__
  62997. __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  62998. uint16x8_t __ret;
  62999. __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
  63000. return __ret;
  63001. }
  63002. #else
  63003. __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  63004. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63005. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63006. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  63007. uint16x8_t __ret;
  63008. __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
  63009. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63010. return __ret;
  63011. }
  63012. #endif
  63013. #ifdef __LITTLE_ENDIAN__
  63014. __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  63015. uint32x4_t __ret;
  63016. __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
  63017. return __ret;
  63018. }
  63019. #else
  63020. __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
  63021. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63022. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63023. uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  63024. uint32x4_t __ret;
  63025. __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
  63026. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63027. return __ret;
  63028. }
  63029. #endif
  63030. #ifdef __LITTLE_ENDIAN__
  63031. __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  63032. uint8x16_t __ret;
  63033. __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
  63034. return __ret;
  63035. }
  63036. #else
  63037. __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  63038. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63039. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63040. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  63041. uint8x16_t __ret;
  63042. __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
  63043. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63044. return __ret;
  63045. }
  63046. #endif
  63047. #ifdef __LITTLE_ENDIAN__
  63048. __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  63049. int16x8_t __ret;
  63050. __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
  63051. return __ret;
  63052. }
  63053. #else
  63054. __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  63055. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63056. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63057. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  63058. int16x8_t __ret;
  63059. __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
  63060. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63061. return __ret;
  63062. }
  63063. #endif
  63064. #ifdef __LITTLE_ENDIAN__
  63065. __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  63066. int32x4_t __ret;
  63067. __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
  63068. return __ret;
  63069. }
  63070. #else
  63071. __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
  63072. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63073. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63074. int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  63075. int32x4_t __ret;
  63076. __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
  63077. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63078. return __ret;
  63079. }
  63080. #endif
  63081. #ifdef __LITTLE_ENDIAN__
  63082. __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  63083. int8x16_t __ret;
  63084. __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
  63085. return __ret;
  63086. }
  63087. #else
  63088. __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  63089. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63090. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63091. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  63092. int8x16_t __ret;
  63093. __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
  63094. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63095. return __ret;
  63096. }
  63097. #endif
  63098. #ifdef __LITTLE_ENDIAN__
  63099. __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63100. uint16x8_t __ret;
  63101. __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
  63102. return __ret;
  63103. }
  63104. #else
  63105. __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63106. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63107. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63108. uint16x8_t __ret;
  63109. __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
  63110. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63111. return __ret;
  63112. }
  63113. #endif
  63114. #ifdef __LITTLE_ENDIAN__
  63115. __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63116. uint64x2_t __ret;
  63117. __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
  63118. return __ret;
  63119. }
  63120. #else
  63121. __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63122. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63123. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63124. uint64x2_t __ret;
  63125. __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
  63126. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63127. return __ret;
  63128. }
  63129. #endif
  63130. #ifdef __LITTLE_ENDIAN__
  63131. __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63132. uint32x4_t __ret;
  63133. __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
  63134. return __ret;
  63135. }
  63136. #else
  63137. __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63138. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63139. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63140. uint32x4_t __ret;
  63141. __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
  63142. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63143. return __ret;
  63144. }
  63145. #endif
  63146. #ifdef __LITTLE_ENDIAN__
  63147. __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  63148. int16x8_t __ret;
  63149. __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
  63150. return __ret;
  63151. }
  63152. #else
  63153. __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  63154. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63155. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63156. int16x8_t __ret;
  63157. __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
  63158. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63159. return __ret;
  63160. }
  63161. #endif
  63162. #ifdef __LITTLE_ENDIAN__
  63163. __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  63164. int64x2_t __ret;
  63165. __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
  63166. return __ret;
  63167. }
  63168. #else
  63169. __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  63170. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63171. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63172. int64x2_t __ret;
  63173. __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
  63174. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63175. return __ret;
  63176. }
  63177. #endif
  63178. #ifdef __LITTLE_ENDIAN__
  63179. __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  63180. int32x4_t __ret;
  63181. __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
  63182. return __ret;
  63183. }
  63184. #else
  63185. __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  63186. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63187. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63188. int32x4_t __ret;
  63189. __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
  63190. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63191. return __ret;
  63192. }
  63193. #endif
  63194. #ifdef __LITTLE_ENDIAN__
  63195. __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
  63196. uint16x8_t __ret;
  63197. __ret = __p0 - vmovl_high_u8(__p1);
  63198. return __ret;
  63199. }
  63200. #else
  63201. __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
  63202. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63203. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63204. uint16x8_t __ret;
  63205. __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
  63206. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63207. return __ret;
  63208. }
  63209. #endif
  63210. #ifdef __LITTLE_ENDIAN__
  63211. __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
  63212. uint64x2_t __ret;
  63213. __ret = __p0 - vmovl_high_u32(__p1);
  63214. return __ret;
  63215. }
  63216. #else
  63217. __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
  63218. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63219. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63220. uint64x2_t __ret;
  63221. __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
  63222. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63223. return __ret;
  63224. }
  63225. #endif
  63226. #ifdef __LITTLE_ENDIAN__
  63227. __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
  63228. uint32x4_t __ret;
  63229. __ret = __p0 - vmovl_high_u16(__p1);
  63230. return __ret;
  63231. }
  63232. #else
  63233. __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
  63234. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63235. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63236. uint32x4_t __ret;
  63237. __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
  63238. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63239. return __ret;
  63240. }
  63241. #endif
  63242. #ifdef __LITTLE_ENDIAN__
  63243. __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
  63244. int16x8_t __ret;
  63245. __ret = __p0 - vmovl_high_s8(__p1);
  63246. return __ret;
  63247. }
  63248. #else
  63249. __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
  63250. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63251. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63252. int16x8_t __ret;
  63253. __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
  63254. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63255. return __ret;
  63256. }
  63257. #endif
  63258. #ifdef __LITTLE_ENDIAN__
  63259. __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
  63260. int64x2_t __ret;
  63261. __ret = __p0 - vmovl_high_s32(__p1);
  63262. return __ret;
  63263. }
  63264. #else
  63265. __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
  63266. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63267. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63268. int64x2_t __ret;
  63269. __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
  63270. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63271. return __ret;
  63272. }
  63273. #endif
  63274. #ifdef __LITTLE_ENDIAN__
  63275. __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
  63276. int32x4_t __ret;
  63277. __ret = __p0 - vmovl_high_s16(__p1);
  63278. return __ret;
  63279. }
  63280. #else
  63281. __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
  63282. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63283. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63284. int32x4_t __ret;
  63285. __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
  63286. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63287. return __ret;
  63288. }
  63289. #endif
  63290. #ifdef __LITTLE_ENDIAN__
  63291. __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  63292. poly8x8_t __ret;
  63293. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63294. return __ret;
  63295. }
  63296. #else
  63297. __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  63298. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63299. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63300. poly8x8_t __ret;
  63301. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63302. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63303. return __ret;
  63304. }
  63305. #endif
  63306. #ifdef __LITTLE_ENDIAN__
  63307. __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  63308. poly16x4_t __ret;
  63309. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63310. return __ret;
  63311. }
  63312. #else
  63313. __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  63314. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63315. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63316. poly16x4_t __ret;
  63317. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63318. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63319. return __ret;
  63320. }
  63321. #endif
  63322. #ifdef __LITTLE_ENDIAN__
  63323. __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  63324. poly8x16_t __ret;
  63325. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63326. return __ret;
  63327. }
  63328. #else
  63329. __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  63330. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63331. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63332. poly8x16_t __ret;
  63333. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63334. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63335. return __ret;
  63336. }
  63337. #endif
  63338. #ifdef __LITTLE_ENDIAN__
  63339. __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  63340. poly64x2_t __ret;
  63341. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63342. return __ret;
  63343. }
  63344. #else
  63345. __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  63346. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63347. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63348. poly64x2_t __ret;
  63349. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63350. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63351. return __ret;
  63352. }
  63353. #endif
  63354. #ifdef __LITTLE_ENDIAN__
  63355. __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  63356. poly16x8_t __ret;
  63357. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63358. return __ret;
  63359. }
  63360. #else
  63361. __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  63362. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63363. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63364. poly16x8_t __ret;
  63365. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63366. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63367. return __ret;
  63368. }
  63369. #endif
  63370. #ifdef __LITTLE_ENDIAN__
  63371. __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63372. uint8x16_t __ret;
  63373. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63374. return __ret;
  63375. }
  63376. #else
  63377. __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63378. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63379. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63380. uint8x16_t __ret;
  63381. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63382. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63383. return __ret;
  63384. }
  63385. #endif
  63386. #ifdef __LITTLE_ENDIAN__
  63387. __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63388. uint32x4_t __ret;
  63389. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63390. return __ret;
  63391. }
  63392. #else
  63393. __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63394. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63395. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63396. uint32x4_t __ret;
  63397. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63398. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63399. return __ret;
  63400. }
  63401. #endif
  63402. #ifdef __LITTLE_ENDIAN__
  63403. __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  63404. uint64x2_t __ret;
  63405. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63406. return __ret;
  63407. }
  63408. #else
  63409. __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  63410. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63411. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63412. uint64x2_t __ret;
  63413. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63414. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63415. return __ret;
  63416. }
  63417. #endif
  63418. #ifdef __LITTLE_ENDIAN__
  63419. __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63420. uint16x8_t __ret;
  63421. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63422. return __ret;
  63423. }
  63424. #else
  63425. __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63426. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63427. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63428. uint16x8_t __ret;
  63429. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63430. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63431. return __ret;
  63432. }
  63433. #endif
  63434. #ifdef __LITTLE_ENDIAN__
  63435. __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
  63436. int8x16_t __ret;
  63437. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63438. return __ret;
  63439. }
  63440. #else
  63441. __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
  63442. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63443. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63444. int8x16_t __ret;
  63445. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
  63446. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63447. return __ret;
  63448. }
  63449. #endif
  63450. #ifdef __LITTLE_ENDIAN__
  63451. __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
  63452. float64x2_t __ret;
  63453. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63454. return __ret;
  63455. }
  63456. #else
  63457. __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
  63458. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63459. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63460. float64x2_t __ret;
  63461. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63462. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63463. return __ret;
  63464. }
  63465. #endif
  63466. #ifdef __LITTLE_ENDIAN__
  63467. __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
  63468. float32x4_t __ret;
  63469. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63470. return __ret;
  63471. }
  63472. #else
  63473. __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
  63474. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63475. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63476. float32x4_t __ret;
  63477. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63478. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63479. return __ret;
  63480. }
  63481. #endif
  63482. #ifdef __LITTLE_ENDIAN__
  63483. __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
  63484. int32x4_t __ret;
  63485. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63486. return __ret;
  63487. }
  63488. #else
  63489. __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
  63490. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63491. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63492. int32x4_t __ret;
  63493. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63494. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63495. return __ret;
  63496. }
  63497. #endif
  63498. #ifdef __LITTLE_ENDIAN__
  63499. __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
  63500. int64x2_t __ret;
  63501. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63502. return __ret;
  63503. }
  63504. #else
  63505. __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
  63506. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63507. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63508. int64x2_t __ret;
  63509. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63510. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63511. return __ret;
  63512. }
  63513. #endif
  63514. #ifdef __LITTLE_ENDIAN__
  63515. __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
  63516. int16x8_t __ret;
  63517. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63518. return __ret;
  63519. }
  63520. #else
  63521. __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
  63522. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63523. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63524. int16x8_t __ret;
  63525. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63526. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63527. return __ret;
  63528. }
  63529. #endif
  63530. #ifdef __LITTLE_ENDIAN__
  63531. __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  63532. uint8x8_t __ret;
  63533. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63534. return __ret;
  63535. }
  63536. #else
  63537. __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  63538. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63539. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63540. uint8x8_t __ret;
  63541. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63542. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63543. return __ret;
  63544. }
  63545. #endif
  63546. #ifdef __LITTLE_ENDIAN__
  63547. __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  63548. uint32x2_t __ret;
  63549. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63550. return __ret;
  63551. }
  63552. #else
  63553. __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  63554. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63555. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63556. uint32x2_t __ret;
  63557. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63558. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63559. return __ret;
  63560. }
  63561. #endif
  63562. #ifdef __LITTLE_ENDIAN__
  63563. __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  63564. uint16x4_t __ret;
  63565. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63566. return __ret;
  63567. }
  63568. #else
  63569. __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  63570. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63571. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63572. uint16x4_t __ret;
  63573. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63574. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63575. return __ret;
  63576. }
  63577. #endif
  63578. #ifdef __LITTLE_ENDIAN__
  63579. __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
  63580. int8x8_t __ret;
  63581. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
  63582. return __ret;
  63583. }
  63584. #else
  63585. __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
  63586. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63587. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63588. int8x8_t __ret;
  63589. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
  63590. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63591. return __ret;
  63592. }
  63593. #endif
  63594. #ifdef __LITTLE_ENDIAN__
  63595. __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
  63596. float32x2_t __ret;
  63597. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63598. return __ret;
  63599. }
  63600. #else
  63601. __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
  63602. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63603. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63604. float32x2_t __ret;
  63605. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63606. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63607. return __ret;
  63608. }
  63609. #endif
  63610. #ifdef __LITTLE_ENDIAN__
  63611. __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
  63612. int32x2_t __ret;
  63613. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  63614. return __ret;
  63615. }
  63616. #else
  63617. __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
  63618. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63619. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63620. int32x2_t __ret;
  63621. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  63622. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63623. return __ret;
  63624. }
  63625. #endif
  63626. #ifdef __LITTLE_ENDIAN__
  63627. __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
  63628. int16x4_t __ret;
  63629. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
  63630. return __ret;
  63631. }
  63632. #else
  63633. __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
  63634. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63635. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63636. int16x4_t __ret;
  63637. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
  63638. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63639. return __ret;
  63640. }
  63641. #endif
  63642. #ifdef __LITTLE_ENDIAN__
  63643. __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  63644. poly8x8_t __ret;
  63645. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63646. return __ret;
  63647. }
  63648. #else
  63649. __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  63650. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63651. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63652. poly8x8_t __ret;
  63653. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63654. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63655. return __ret;
  63656. }
  63657. #endif
  63658. #ifdef __LITTLE_ENDIAN__
  63659. __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  63660. poly16x4_t __ret;
  63661. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63662. return __ret;
  63663. }
  63664. #else
  63665. __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  63666. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63667. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63668. poly16x4_t __ret;
  63669. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63670. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63671. return __ret;
  63672. }
  63673. #endif
  63674. #ifdef __LITTLE_ENDIAN__
  63675. __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  63676. poly8x16_t __ret;
  63677. __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63678. return __ret;
  63679. }
  63680. #else
  63681. __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  63682. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63683. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63684. poly8x16_t __ret;
  63685. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63686. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63687. return __ret;
  63688. }
  63689. #endif
  63690. #ifdef __LITTLE_ENDIAN__
  63691. __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  63692. poly64x2_t __ret;
  63693. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63694. return __ret;
  63695. }
  63696. #else
  63697. __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  63698. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63699. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63700. poly64x2_t __ret;
  63701. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63702. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63703. return __ret;
  63704. }
  63705. #endif
  63706. #ifdef __LITTLE_ENDIAN__
  63707. __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  63708. poly16x8_t __ret;
  63709. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63710. return __ret;
  63711. }
  63712. #else
  63713. __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  63714. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63715. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63716. poly16x8_t __ret;
  63717. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63718. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63719. return __ret;
  63720. }
  63721. #endif
  63722. #ifdef __LITTLE_ENDIAN__
  63723. __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63724. uint8x16_t __ret;
  63725. __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63726. return __ret;
  63727. }
  63728. #else
  63729. __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  63730. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63731. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63732. uint8x16_t __ret;
  63733. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63734. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63735. return __ret;
  63736. }
  63737. #endif
  63738. #ifdef __LITTLE_ENDIAN__
  63739. __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63740. uint32x4_t __ret;
  63741. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63742. return __ret;
  63743. }
  63744. #else
  63745. __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  63746. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63747. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63748. uint32x4_t __ret;
  63749. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63750. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63751. return __ret;
  63752. }
  63753. #endif
  63754. #ifdef __LITTLE_ENDIAN__
  63755. __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  63756. uint64x2_t __ret;
  63757. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63758. return __ret;
  63759. }
  63760. #else
  63761. __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  63762. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63763. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63764. uint64x2_t __ret;
  63765. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63766. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63767. return __ret;
  63768. }
  63769. #endif
  63770. #ifdef __LITTLE_ENDIAN__
  63771. __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63772. uint16x8_t __ret;
  63773. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63774. return __ret;
  63775. }
  63776. #else
  63777. __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  63778. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63779. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63780. uint16x8_t __ret;
  63781. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63782. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63783. return __ret;
  63784. }
  63785. #endif
  63786. #ifdef __LITTLE_ENDIAN__
  63787. __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
  63788. int8x16_t __ret;
  63789. __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63790. return __ret;
  63791. }
  63792. #else
  63793. __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
  63794. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63795. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63796. int8x16_t __ret;
  63797. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
  63798. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  63799. return __ret;
  63800. }
  63801. #endif
  63802. #ifdef __LITTLE_ENDIAN__
  63803. __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
  63804. float64x2_t __ret;
  63805. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63806. return __ret;
  63807. }
  63808. #else
  63809. __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
  63810. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63811. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63812. float64x2_t __ret;
  63813. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63814. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63815. return __ret;
  63816. }
  63817. #endif
  63818. #ifdef __LITTLE_ENDIAN__
  63819. __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
  63820. float32x4_t __ret;
  63821. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63822. return __ret;
  63823. }
  63824. #else
  63825. __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
  63826. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63827. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63828. float32x4_t __ret;
  63829. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63830. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63831. return __ret;
  63832. }
  63833. #endif
  63834. #ifdef __LITTLE_ENDIAN__
  63835. __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
  63836. int32x4_t __ret;
  63837. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63838. return __ret;
  63839. }
  63840. #else
  63841. __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
  63842. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63843. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63844. int32x4_t __ret;
  63845. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63846. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63847. return __ret;
  63848. }
  63849. #endif
  63850. #ifdef __LITTLE_ENDIAN__
  63851. __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
  63852. int64x2_t __ret;
  63853. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63854. return __ret;
  63855. }
  63856. #else
  63857. __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
  63858. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63859. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63860. int64x2_t __ret;
  63861. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63862. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63863. return __ret;
  63864. }
  63865. #endif
  63866. #ifdef __LITTLE_ENDIAN__
  63867. __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
  63868. int16x8_t __ret;
  63869. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63870. return __ret;
  63871. }
  63872. #else
  63873. __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
  63874. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63875. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63876. int16x8_t __ret;
  63877. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63878. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63879. return __ret;
  63880. }
  63881. #endif
  63882. #ifdef __LITTLE_ENDIAN__
  63883. __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  63884. uint8x8_t __ret;
  63885. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63886. return __ret;
  63887. }
  63888. #else
  63889. __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  63890. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63891. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63892. uint8x8_t __ret;
  63893. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63894. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63895. return __ret;
  63896. }
  63897. #endif
  63898. #ifdef __LITTLE_ENDIAN__
  63899. __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  63900. uint32x2_t __ret;
  63901. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63902. return __ret;
  63903. }
  63904. #else
  63905. __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  63906. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63907. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63908. uint32x2_t __ret;
  63909. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63910. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63911. return __ret;
  63912. }
  63913. #endif
  63914. #ifdef __LITTLE_ENDIAN__
  63915. __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  63916. uint16x4_t __ret;
  63917. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63918. return __ret;
  63919. }
  63920. #else
  63921. __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  63922. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63923. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63924. uint16x4_t __ret;
  63925. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63926. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63927. return __ret;
  63928. }
  63929. #endif
  63930. #ifdef __LITTLE_ENDIAN__
  63931. __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
  63932. int8x8_t __ret;
  63933. __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
  63934. return __ret;
  63935. }
  63936. #else
  63937. __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
  63938. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  63939. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  63940. int8x8_t __ret;
  63941. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
  63942. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  63943. return __ret;
  63944. }
  63945. #endif
  63946. #ifdef __LITTLE_ENDIAN__
  63947. __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
  63948. float32x2_t __ret;
  63949. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63950. return __ret;
  63951. }
  63952. #else
  63953. __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
  63954. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63955. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63956. float32x2_t __ret;
  63957. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63958. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63959. return __ret;
  63960. }
  63961. #endif
  63962. #ifdef __LITTLE_ENDIAN__
  63963. __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
  63964. int32x2_t __ret;
  63965. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  63966. return __ret;
  63967. }
  63968. #else
  63969. __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
  63970. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  63971. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  63972. int32x2_t __ret;
  63973. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  63974. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  63975. return __ret;
  63976. }
  63977. #endif
  63978. #ifdef __LITTLE_ENDIAN__
  63979. __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
  63980. int16x4_t __ret;
  63981. __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
  63982. return __ret;
  63983. }
  63984. #else
  63985. __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
  63986. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  63987. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  63988. int16x4_t __ret;
  63989. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
  63990. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  63991. return __ret;
  63992. }
  63993. #endif
  63994. #ifdef __LITTLE_ENDIAN__
  63995. __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
  63996. uint64x1_t __ret;
  63997. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  63998. return __ret;
  63999. }
  64000. #else
  64001. __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
  64002. uint64x1_t __ret;
  64003. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  64004. return __ret;
  64005. }
  64006. #endif
  64007. #ifdef __LITTLE_ENDIAN__
  64008. __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64009. uint64x2_t __ret;
  64010. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  64011. return __ret;
  64012. }
  64013. #else
  64014. __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64015. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64016. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64017. uint64x2_t __ret;
  64018. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  64019. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64020. return __ret;
  64021. }
  64022. #endif
  64023. #ifdef __LITTLE_ENDIAN__
  64024. __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64025. uint64x2_t __ret;
  64026. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  64027. return __ret;
  64028. }
  64029. #else
  64030. __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64031. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64032. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64033. uint64x2_t __ret;
  64034. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  64035. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64036. return __ret;
  64037. }
  64038. #endif
  64039. #ifdef __LITTLE_ENDIAN__
  64040. __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
  64041. uint64x2_t __ret;
  64042. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
  64043. return __ret;
  64044. }
  64045. #else
  64046. __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
  64047. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64048. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64049. uint64x2_t __ret;
  64050. __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
  64051. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64052. return __ret;
  64053. }
  64054. #endif
  64055. #ifdef __LITTLE_ENDIAN__
  64056. __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
  64057. uint64x1_t __ret;
  64058. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  64059. return __ret;
  64060. }
  64061. #else
  64062. __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
  64063. uint64x1_t __ret;
  64064. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  64065. return __ret;
  64066. }
  64067. #endif
  64068. #ifdef __LITTLE_ENDIAN__
  64069. __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
  64070. uint64x1_t __ret;
  64071. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  64072. return __ret;
  64073. }
  64074. #else
  64075. __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
  64076. uint64x1_t __ret;
  64077. __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
  64078. return __ret;
  64079. }
  64080. #endif
  64081. #ifdef __LITTLE_ENDIAN__
  64082. __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
  64083. uint64_t __ret;
  64084. __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
  64085. return __ret;
  64086. }
  64087. #else
  64088. __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
  64089. uint64_t __ret;
  64090. __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
  64091. return __ret;
  64092. }
  64093. #endif
  64094. #ifdef __LITTLE_ENDIAN__
  64095. __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
  64096. int64_t __ret;
  64097. __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
  64098. return __ret;
  64099. }
  64100. #else
  64101. __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
  64102. int64_t __ret;
  64103. __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
  64104. return __ret;
  64105. }
  64106. #endif
  64107. #ifdef __LITTLE_ENDIAN__
  64108. __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
  64109. int8_t __ret;
  64110. __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
  64111. return __ret;
  64112. }
  64113. #else
  64114. __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
  64115. int8_t __ret;
  64116. __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
  64117. return __ret;
  64118. }
  64119. #endif
  64120. #ifdef __LITTLE_ENDIAN__
  64121. __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
  64122. int32_t __ret;
  64123. __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
  64124. return __ret;
  64125. }
  64126. #else
  64127. __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
  64128. int32_t __ret;
  64129. __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
  64130. return __ret;
  64131. }
  64132. #endif
  64133. #ifdef __LITTLE_ENDIAN__
  64134. __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
  64135. int64_t __ret;
  64136. __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
  64137. return __ret;
  64138. }
  64139. #else
  64140. __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
  64141. int64_t __ret;
  64142. __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
  64143. return __ret;
  64144. }
  64145. #endif
  64146. #ifdef __LITTLE_ENDIAN__
  64147. __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
  64148. int16_t __ret;
  64149. __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
  64150. return __ret;
  64151. }
  64152. #else
  64153. __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
  64154. int16_t __ret;
  64155. __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
  64156. return __ret;
  64157. }
  64158. #endif
  64159. #ifdef __LITTLE_ENDIAN__
  64160. __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  64161. int8x16_t __ret;
  64162. __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
  64163. return __ret;
  64164. }
  64165. #else
  64166. __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
  64167. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64168. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64169. int8x16_t __ret;
  64170. __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
  64171. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64172. return __ret;
  64173. }
  64174. #endif
  64175. #ifdef __LITTLE_ENDIAN__
  64176. __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  64177. int32x4_t __ret;
  64178. __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
  64179. return __ret;
  64180. }
  64181. #else
  64182. __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
  64183. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64184. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64185. int32x4_t __ret;
  64186. __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
  64187. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64188. return __ret;
  64189. }
  64190. #endif
  64191. #ifdef __LITTLE_ENDIAN__
  64192. __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  64193. int64x2_t __ret;
  64194. __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
  64195. return __ret;
  64196. }
  64197. #else
  64198. __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
  64199. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64200. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64201. int64x2_t __ret;
  64202. __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
  64203. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64204. return __ret;
  64205. }
  64206. #endif
  64207. #ifdef __LITTLE_ENDIAN__
  64208. __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  64209. int16x8_t __ret;
  64210. __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
  64211. return __ret;
  64212. }
  64213. #else
  64214. __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
  64215. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64216. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64217. int16x8_t __ret;
  64218. __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
  64219. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64220. return __ret;
  64221. }
  64222. #endif
  64223. #ifdef __LITTLE_ENDIAN__
  64224. __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
  64225. int8x8_t __ret;
  64226. __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
  64227. return __ret;
  64228. }
  64229. #else
  64230. __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
  64231. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64232. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64233. int8x8_t __ret;
  64234. __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
  64235. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64236. return __ret;
  64237. }
  64238. #endif
  64239. #ifdef __LITTLE_ENDIAN__
  64240. __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
  64241. int32x2_t __ret;
  64242. __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
  64243. return __ret;
  64244. }
  64245. #else
  64246. __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
  64247. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64248. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64249. int32x2_t __ret;
  64250. __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
  64251. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64252. return __ret;
  64253. }
  64254. #endif
  64255. #ifdef __LITTLE_ENDIAN__
  64256. __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
  64257. int64x1_t __ret;
  64258. __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  64259. return __ret;
  64260. }
  64261. #else
  64262. __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
  64263. int64x1_t __ret;
  64264. __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
  64265. return __ret;
  64266. }
  64267. #endif
  64268. #ifdef __LITTLE_ENDIAN__
  64269. __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
  64270. int16x4_t __ret;
  64271. __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
  64272. return __ret;
  64273. }
  64274. #else
  64275. __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
  64276. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64277. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64278. int16x4_t __ret;
  64279. __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
  64280. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64281. return __ret;
  64282. }
  64283. #endif
  64284. #ifdef __LITTLE_ENDIAN__
  64285. __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64286. poly8x8_t __ret;
  64287. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64288. return __ret;
  64289. }
  64290. #else
  64291. __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64292. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64293. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64294. poly8x8_t __ret;
  64295. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64296. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64297. return __ret;
  64298. }
  64299. #endif
  64300. #ifdef __LITTLE_ENDIAN__
  64301. __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  64302. poly16x4_t __ret;
  64303. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64304. return __ret;
  64305. }
  64306. #else
  64307. __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  64308. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64309. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64310. poly16x4_t __ret;
  64311. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64312. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64313. return __ret;
  64314. }
  64315. #endif
  64316. #ifdef __LITTLE_ENDIAN__
  64317. __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  64318. poly8x16_t __ret;
  64319. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64320. return __ret;
  64321. }
  64322. #else
  64323. __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  64324. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64325. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64326. poly8x16_t __ret;
  64327. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64328. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64329. return __ret;
  64330. }
  64331. #endif
  64332. #ifdef __LITTLE_ENDIAN__
  64333. __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64334. poly64x2_t __ret;
  64335. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64336. return __ret;
  64337. }
  64338. #else
  64339. __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64340. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64341. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64342. poly64x2_t __ret;
  64343. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64344. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64345. return __ret;
  64346. }
  64347. #endif
  64348. #ifdef __LITTLE_ENDIAN__
  64349. __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  64350. poly16x8_t __ret;
  64351. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64352. return __ret;
  64353. }
  64354. #else
  64355. __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  64356. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64357. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64358. poly16x8_t __ret;
  64359. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64360. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64361. return __ret;
  64362. }
  64363. #endif
  64364. #ifdef __LITTLE_ENDIAN__
  64365. __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  64366. uint8x16_t __ret;
  64367. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64368. return __ret;
  64369. }
  64370. #else
  64371. __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  64372. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64373. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64374. uint8x16_t __ret;
  64375. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64376. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64377. return __ret;
  64378. }
  64379. #endif
  64380. #ifdef __LITTLE_ENDIAN__
  64381. __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  64382. uint32x4_t __ret;
  64383. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64384. return __ret;
  64385. }
  64386. #else
  64387. __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  64388. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64389. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64390. uint32x4_t __ret;
  64391. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64392. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64393. return __ret;
  64394. }
  64395. #endif
  64396. #ifdef __LITTLE_ENDIAN__
  64397. __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64398. uint64x2_t __ret;
  64399. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64400. return __ret;
  64401. }
  64402. #else
  64403. __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64404. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64405. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64406. uint64x2_t __ret;
  64407. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64408. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64409. return __ret;
  64410. }
  64411. #endif
  64412. #ifdef __LITTLE_ENDIAN__
  64413. __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  64414. uint16x8_t __ret;
  64415. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64416. return __ret;
  64417. }
  64418. #else
  64419. __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  64420. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64421. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64422. uint16x8_t __ret;
  64423. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64424. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64425. return __ret;
  64426. }
  64427. #endif
  64428. #ifdef __LITTLE_ENDIAN__
  64429. __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
  64430. int8x16_t __ret;
  64431. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64432. return __ret;
  64433. }
  64434. #else
  64435. __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
  64436. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64437. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64438. int8x16_t __ret;
  64439. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
  64440. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64441. return __ret;
  64442. }
  64443. #endif
  64444. #ifdef __LITTLE_ENDIAN__
  64445. __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
  64446. float64x2_t __ret;
  64447. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64448. return __ret;
  64449. }
  64450. #else
  64451. __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
  64452. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64453. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64454. float64x2_t __ret;
  64455. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64456. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64457. return __ret;
  64458. }
  64459. #endif
  64460. #ifdef __LITTLE_ENDIAN__
  64461. __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
  64462. float32x4_t __ret;
  64463. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64464. return __ret;
  64465. }
  64466. #else
  64467. __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
  64468. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64469. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64470. float32x4_t __ret;
  64471. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64472. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64473. return __ret;
  64474. }
  64475. #endif
  64476. #ifdef __LITTLE_ENDIAN__
  64477. __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
  64478. int32x4_t __ret;
  64479. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64480. return __ret;
  64481. }
  64482. #else
  64483. __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
  64484. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64485. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64486. int32x4_t __ret;
  64487. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64488. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64489. return __ret;
  64490. }
  64491. #endif
  64492. #ifdef __LITTLE_ENDIAN__
  64493. __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
  64494. int64x2_t __ret;
  64495. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64496. return __ret;
  64497. }
  64498. #else
  64499. __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
  64500. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64501. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64502. int64x2_t __ret;
  64503. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64504. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64505. return __ret;
  64506. }
  64507. #endif
  64508. #ifdef __LITTLE_ENDIAN__
  64509. __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
  64510. int16x8_t __ret;
  64511. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64512. return __ret;
  64513. }
  64514. #else
  64515. __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
  64516. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64517. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64518. int16x8_t __ret;
  64519. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64520. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64521. return __ret;
  64522. }
  64523. #endif
  64524. #ifdef __LITTLE_ENDIAN__
  64525. __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  64526. uint8x8_t __ret;
  64527. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64528. return __ret;
  64529. }
  64530. #else
  64531. __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  64532. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64533. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64534. uint8x8_t __ret;
  64535. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64536. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64537. return __ret;
  64538. }
  64539. #endif
  64540. #ifdef __LITTLE_ENDIAN__
  64541. __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  64542. uint32x2_t __ret;
  64543. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64544. return __ret;
  64545. }
  64546. #else
  64547. __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  64548. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64549. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64550. uint32x2_t __ret;
  64551. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64552. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64553. return __ret;
  64554. }
  64555. #endif
  64556. #ifdef __LITTLE_ENDIAN__
  64557. __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  64558. uint16x4_t __ret;
  64559. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64560. return __ret;
  64561. }
  64562. #else
  64563. __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  64564. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64565. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64566. uint16x4_t __ret;
  64567. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64568. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64569. return __ret;
  64570. }
  64571. #endif
  64572. #ifdef __LITTLE_ENDIAN__
  64573. __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
  64574. int8x8_t __ret;
  64575. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
  64576. return __ret;
  64577. }
  64578. #else
  64579. __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
  64580. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64581. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64582. int8x8_t __ret;
  64583. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
  64584. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64585. return __ret;
  64586. }
  64587. #endif
  64588. #ifdef __LITTLE_ENDIAN__
  64589. __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
  64590. float32x2_t __ret;
  64591. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64592. return __ret;
  64593. }
  64594. #else
  64595. __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
  64596. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64597. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64598. float32x2_t __ret;
  64599. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64600. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64601. return __ret;
  64602. }
  64603. #endif
  64604. #ifdef __LITTLE_ENDIAN__
  64605. __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
  64606. int32x2_t __ret;
  64607. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  64608. return __ret;
  64609. }
  64610. #else
  64611. __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
  64612. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64613. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64614. int32x2_t __ret;
  64615. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  64616. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64617. return __ret;
  64618. }
  64619. #endif
  64620. #ifdef __LITTLE_ENDIAN__
  64621. __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
  64622. int16x4_t __ret;
  64623. __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
  64624. return __ret;
  64625. }
  64626. #else
  64627. __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
  64628. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64629. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64630. int16x4_t __ret;
  64631. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
  64632. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64633. return __ret;
  64634. }
  64635. #endif
  64636. #ifdef __LITTLE_ENDIAN__
  64637. __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64638. poly8x8_t __ret;
  64639. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64640. return __ret;
  64641. }
  64642. #else
  64643. __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64644. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64645. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64646. poly8x8_t __ret;
  64647. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64648. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64649. return __ret;
  64650. }
  64651. #endif
  64652. #ifdef __LITTLE_ENDIAN__
  64653. __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  64654. poly16x4_t __ret;
  64655. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64656. return __ret;
  64657. }
  64658. #else
  64659. __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  64660. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64661. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64662. poly16x4_t __ret;
  64663. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64664. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64665. return __ret;
  64666. }
  64667. #endif
  64668. #ifdef __LITTLE_ENDIAN__
  64669. __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  64670. poly8x16_t __ret;
  64671. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64672. return __ret;
  64673. }
  64674. #else
  64675. __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  64676. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64677. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64678. poly8x16_t __ret;
  64679. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64680. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64681. return __ret;
  64682. }
  64683. #endif
  64684. #ifdef __LITTLE_ENDIAN__
  64685. __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64686. poly64x2_t __ret;
  64687. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64688. return __ret;
  64689. }
  64690. #else
  64691. __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  64692. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64693. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64694. poly64x2_t __ret;
  64695. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64696. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64697. return __ret;
  64698. }
  64699. #endif
  64700. #ifdef __LITTLE_ENDIAN__
  64701. __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  64702. poly16x8_t __ret;
  64703. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64704. return __ret;
  64705. }
  64706. #else
  64707. __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  64708. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64709. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64710. poly16x8_t __ret;
  64711. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64712. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64713. return __ret;
  64714. }
  64715. #endif
  64716. #ifdef __LITTLE_ENDIAN__
  64717. __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  64718. uint8x16_t __ret;
  64719. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64720. return __ret;
  64721. }
  64722. #else
  64723. __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  64724. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64725. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64726. uint8x16_t __ret;
  64727. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64728. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64729. return __ret;
  64730. }
  64731. #endif
  64732. #ifdef __LITTLE_ENDIAN__
  64733. __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  64734. uint32x4_t __ret;
  64735. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64736. return __ret;
  64737. }
  64738. #else
  64739. __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  64740. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64741. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64742. uint32x4_t __ret;
  64743. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64744. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64745. return __ret;
  64746. }
  64747. #endif
  64748. #ifdef __LITTLE_ENDIAN__
  64749. __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64750. uint64x2_t __ret;
  64751. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64752. return __ret;
  64753. }
  64754. #else
  64755. __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  64756. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64757. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64758. uint64x2_t __ret;
  64759. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64760. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64761. return __ret;
  64762. }
  64763. #endif
  64764. #ifdef __LITTLE_ENDIAN__
  64765. __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  64766. uint16x8_t __ret;
  64767. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64768. return __ret;
  64769. }
  64770. #else
  64771. __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  64772. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64773. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64774. uint16x8_t __ret;
  64775. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64776. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64777. return __ret;
  64778. }
  64779. #endif
  64780. #ifdef __LITTLE_ENDIAN__
  64781. __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
  64782. int8x16_t __ret;
  64783. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64784. return __ret;
  64785. }
  64786. #else
  64787. __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
  64788. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64789. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64790. int8x16_t __ret;
  64791. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
  64792. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  64793. return __ret;
  64794. }
  64795. #endif
  64796. #ifdef __LITTLE_ENDIAN__
  64797. __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
  64798. float64x2_t __ret;
  64799. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64800. return __ret;
  64801. }
  64802. #else
  64803. __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
  64804. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64805. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64806. float64x2_t __ret;
  64807. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64808. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64809. return __ret;
  64810. }
  64811. #endif
  64812. #ifdef __LITTLE_ENDIAN__
  64813. __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
  64814. float32x4_t __ret;
  64815. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64816. return __ret;
  64817. }
  64818. #else
  64819. __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
  64820. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64821. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64822. float32x4_t __ret;
  64823. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64824. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64825. return __ret;
  64826. }
  64827. #endif
  64828. #ifdef __LITTLE_ENDIAN__
  64829. __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
  64830. int32x4_t __ret;
  64831. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64832. return __ret;
  64833. }
  64834. #else
  64835. __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
  64836. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64837. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64838. int32x4_t __ret;
  64839. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64840. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64841. return __ret;
  64842. }
  64843. #endif
  64844. #ifdef __LITTLE_ENDIAN__
  64845. __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
  64846. int64x2_t __ret;
  64847. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64848. return __ret;
  64849. }
  64850. #else
  64851. __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
  64852. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64853. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64854. int64x2_t __ret;
  64855. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64856. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64857. return __ret;
  64858. }
  64859. #endif
  64860. #ifdef __LITTLE_ENDIAN__
  64861. __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
  64862. int16x8_t __ret;
  64863. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64864. return __ret;
  64865. }
  64866. #else
  64867. __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
  64868. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64869. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64870. int16x8_t __ret;
  64871. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64872. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64873. return __ret;
  64874. }
  64875. #endif
  64876. #ifdef __LITTLE_ENDIAN__
  64877. __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  64878. uint8x8_t __ret;
  64879. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64880. return __ret;
  64881. }
  64882. #else
  64883. __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  64884. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64885. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64886. uint8x8_t __ret;
  64887. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64888. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64889. return __ret;
  64890. }
  64891. #endif
  64892. #ifdef __LITTLE_ENDIAN__
  64893. __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  64894. uint32x2_t __ret;
  64895. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64896. return __ret;
  64897. }
  64898. #else
  64899. __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  64900. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64901. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64902. uint32x2_t __ret;
  64903. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64904. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64905. return __ret;
  64906. }
  64907. #endif
  64908. #ifdef __LITTLE_ENDIAN__
  64909. __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  64910. uint16x4_t __ret;
  64911. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64912. return __ret;
  64913. }
  64914. #else
  64915. __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  64916. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64917. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64918. uint16x4_t __ret;
  64919. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64920. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64921. return __ret;
  64922. }
  64923. #endif
  64924. #ifdef __LITTLE_ENDIAN__
  64925. __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
  64926. int8x8_t __ret;
  64927. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
  64928. return __ret;
  64929. }
  64930. #else
  64931. __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
  64932. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64933. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64934. int8x8_t __ret;
  64935. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
  64936. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  64937. return __ret;
  64938. }
  64939. #endif
  64940. #ifdef __LITTLE_ENDIAN__
  64941. __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
  64942. float32x2_t __ret;
  64943. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64944. return __ret;
  64945. }
  64946. #else
  64947. __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
  64948. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64949. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64950. float32x2_t __ret;
  64951. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64952. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64953. return __ret;
  64954. }
  64955. #endif
  64956. #ifdef __LITTLE_ENDIAN__
  64957. __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
  64958. int32x2_t __ret;
  64959. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  64960. return __ret;
  64961. }
  64962. #else
  64963. __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
  64964. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  64965. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  64966. int32x2_t __ret;
  64967. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  64968. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  64969. return __ret;
  64970. }
  64971. #endif
  64972. #ifdef __LITTLE_ENDIAN__
  64973. __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
  64974. int16x4_t __ret;
  64975. __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
  64976. return __ret;
  64977. }
  64978. #else
  64979. __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
  64980. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  64981. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  64982. int16x4_t __ret;
  64983. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
  64984. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  64985. return __ret;
  64986. }
  64987. #endif
  64988. #ifdef __LITTLE_ENDIAN__
  64989. __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64990. poly8x8_t __ret;
  64991. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  64992. return __ret;
  64993. }
  64994. #else
  64995. __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
  64996. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  64997. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  64998. poly8x8_t __ret;
  64999. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65000. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65001. return __ret;
  65002. }
  65003. #endif
  65004. #ifdef __LITTLE_ENDIAN__
  65005. __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  65006. poly16x4_t __ret;
  65007. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65008. return __ret;
  65009. }
  65010. #else
  65011. __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
  65012. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65013. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65014. poly16x4_t __ret;
  65015. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65016. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65017. return __ret;
  65018. }
  65019. #endif
  65020. #ifdef __LITTLE_ENDIAN__
  65021. __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  65022. poly8x16_t __ret;
  65023. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65024. return __ret;
  65025. }
  65026. #else
  65027. __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  65028. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65029. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65030. poly8x16_t __ret;
  65031. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65032. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65033. return __ret;
  65034. }
  65035. #endif
  65036. #ifdef __LITTLE_ENDIAN__
  65037. __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  65038. poly64x2_t __ret;
  65039. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65040. return __ret;
  65041. }
  65042. #else
  65043. __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  65044. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65045. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65046. poly64x2_t __ret;
  65047. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65048. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65049. return __ret;
  65050. }
  65051. #endif
  65052. #ifdef __LITTLE_ENDIAN__
  65053. __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  65054. poly16x8_t __ret;
  65055. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  65056. return __ret;
  65057. }
  65058. #else
  65059. __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  65060. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65061. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65062. poly16x8_t __ret;
  65063. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65064. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65065. return __ret;
  65066. }
  65067. #endif
  65068. #ifdef __LITTLE_ENDIAN__
  65069. __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  65070. uint8x16_t __ret;
  65071. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65072. return __ret;
  65073. }
  65074. #else
  65075. __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  65076. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65077. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65078. uint8x16_t __ret;
  65079. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65080. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65081. return __ret;
  65082. }
  65083. #endif
  65084. #ifdef __LITTLE_ENDIAN__
  65085. __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  65086. uint32x4_t __ret;
  65087. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65088. return __ret;
  65089. }
  65090. #else
  65091. __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  65092. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65093. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65094. uint32x4_t __ret;
  65095. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65096. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65097. return __ret;
  65098. }
  65099. #endif
  65100. #ifdef __LITTLE_ENDIAN__
  65101. __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  65102. uint64x2_t __ret;
  65103. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65104. return __ret;
  65105. }
  65106. #else
  65107. __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  65108. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65109. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65110. uint64x2_t __ret;
  65111. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65112. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65113. return __ret;
  65114. }
  65115. #endif
  65116. #ifdef __LITTLE_ENDIAN__
  65117. __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  65118. uint16x8_t __ret;
  65119. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  65120. return __ret;
  65121. }
  65122. #else
  65123. __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  65124. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65125. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65126. uint16x8_t __ret;
  65127. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65128. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65129. return __ret;
  65130. }
  65131. #endif
  65132. #ifdef __LITTLE_ENDIAN__
  65133. __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
  65134. int8x16_t __ret;
  65135. __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65136. return __ret;
  65137. }
  65138. #else
  65139. __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
  65140. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65141. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65142. int8x16_t __ret;
  65143. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  65144. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65145. return __ret;
  65146. }
  65147. #endif
  65148. #ifdef __LITTLE_ENDIAN__
  65149. __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
  65150. float64x2_t __ret;
  65151. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65152. return __ret;
  65153. }
  65154. #else
  65155. __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
  65156. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65157. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65158. float64x2_t __ret;
  65159. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65160. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65161. return __ret;
  65162. }
  65163. #endif
  65164. #ifdef __LITTLE_ENDIAN__
  65165. __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
  65166. float32x4_t __ret;
  65167. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65168. return __ret;
  65169. }
  65170. #else
  65171. __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
  65172. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65173. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65174. float32x4_t __ret;
  65175. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65176. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65177. return __ret;
  65178. }
  65179. #endif
  65180. #ifdef __LITTLE_ENDIAN__
  65181. __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
  65182. int32x4_t __ret;
  65183. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65184. return __ret;
  65185. }
  65186. #else
  65187. __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
  65188. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65189. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65190. int32x4_t __ret;
  65191. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65192. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65193. return __ret;
  65194. }
  65195. #endif
  65196. #ifdef __LITTLE_ENDIAN__
  65197. __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
  65198. int64x2_t __ret;
  65199. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65200. return __ret;
  65201. }
  65202. #else
  65203. __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
  65204. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65205. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65206. int64x2_t __ret;
  65207. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65208. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65209. return __ret;
  65210. }
  65211. #endif
  65212. #ifdef __LITTLE_ENDIAN__
  65213. __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
  65214. int16x8_t __ret;
  65215. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  65216. return __ret;
  65217. }
  65218. #else
  65219. __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
  65220. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65221. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65222. int16x8_t __ret;
  65223. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65224. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65225. return __ret;
  65226. }
  65227. #endif
  65228. #ifdef __LITTLE_ENDIAN__
  65229. __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65230. uint8x8_t __ret;
  65231. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  65232. return __ret;
  65233. }
  65234. #else
  65235. __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65236. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65237. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65238. uint8x8_t __ret;
  65239. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65240. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65241. return __ret;
  65242. }
  65243. #endif
  65244. #ifdef __LITTLE_ENDIAN__
  65245. __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65246. uint32x2_t __ret;
  65247. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65248. return __ret;
  65249. }
  65250. #else
  65251. __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65252. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65253. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65254. uint32x2_t __ret;
  65255. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65256. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65257. return __ret;
  65258. }
  65259. #endif
  65260. #ifdef __LITTLE_ENDIAN__
  65261. __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65262. uint16x4_t __ret;
  65263. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65264. return __ret;
  65265. }
  65266. #else
  65267. __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65268. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65269. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65270. uint16x4_t __ret;
  65271. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65272. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65273. return __ret;
  65274. }
  65275. #endif
  65276. #ifdef __LITTLE_ENDIAN__
  65277. __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
  65278. int8x8_t __ret;
  65279. __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
  65280. return __ret;
  65281. }
  65282. #else
  65283. __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
  65284. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65285. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65286. int8x8_t __ret;
  65287. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
  65288. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65289. return __ret;
  65290. }
  65291. #endif
  65292. #ifdef __LITTLE_ENDIAN__
  65293. __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
  65294. float32x2_t __ret;
  65295. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65296. return __ret;
  65297. }
  65298. #else
  65299. __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
  65300. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65301. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65302. float32x2_t __ret;
  65303. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65304. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65305. return __ret;
  65306. }
  65307. #endif
  65308. #ifdef __LITTLE_ENDIAN__
  65309. __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
  65310. int32x2_t __ret;
  65311. __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
  65312. return __ret;
  65313. }
  65314. #else
  65315. __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
  65316. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65317. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65318. int32x2_t __ret;
  65319. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
  65320. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65321. return __ret;
  65322. }
  65323. #endif
  65324. #ifdef __LITTLE_ENDIAN__
  65325. __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
  65326. int16x4_t __ret;
  65327. __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
  65328. return __ret;
  65329. }
  65330. #else
  65331. __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
  65332. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65333. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65334. int16x4_t __ret;
  65335. __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
  65336. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65337. return __ret;
  65338. }
  65339. #endif
  65340. #ifdef __LITTLE_ENDIAN__
  65341. __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  65342. poly8x8_t __ret;
  65343. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65344. return __ret;
  65345. }
  65346. #else
  65347. __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
  65348. poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65349. poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65350. poly8x8_t __ret;
  65351. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65352. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65353. return __ret;
  65354. }
  65355. #endif
  65356. #ifdef __LITTLE_ENDIAN__
  65357. __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  65358. poly16x4_t __ret;
  65359. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65360. return __ret;
  65361. }
  65362. #else
  65363. __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
  65364. poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65365. poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65366. poly16x4_t __ret;
  65367. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65368. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65369. return __ret;
  65370. }
  65371. #endif
  65372. #ifdef __LITTLE_ENDIAN__
  65373. __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  65374. poly8x16_t __ret;
  65375. __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65376. return __ret;
  65377. }
  65378. #else
  65379. __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
  65380. poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65381. poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65382. poly8x16_t __ret;
  65383. __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65384. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65385. return __ret;
  65386. }
  65387. #endif
  65388. #ifdef __LITTLE_ENDIAN__
  65389. __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  65390. poly64x2_t __ret;
  65391. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65392. return __ret;
  65393. }
  65394. #else
  65395. __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
  65396. poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65397. poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65398. poly64x2_t __ret;
  65399. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65400. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65401. return __ret;
  65402. }
  65403. #endif
  65404. #ifdef __LITTLE_ENDIAN__
  65405. __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  65406. poly16x8_t __ret;
  65407. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65408. return __ret;
  65409. }
  65410. #else
  65411. __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
  65412. poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65413. poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65414. poly16x8_t __ret;
  65415. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65416. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65417. return __ret;
  65418. }
  65419. #endif
  65420. #ifdef __LITTLE_ENDIAN__
  65421. __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  65422. uint8x16_t __ret;
  65423. __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65424. return __ret;
  65425. }
  65426. #else
  65427. __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
  65428. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65429. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65430. uint8x16_t __ret;
  65431. __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65432. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65433. return __ret;
  65434. }
  65435. #endif
  65436. #ifdef __LITTLE_ENDIAN__
  65437. __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  65438. uint32x4_t __ret;
  65439. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65440. return __ret;
  65441. }
  65442. #else
  65443. __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
  65444. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65445. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65446. uint32x4_t __ret;
  65447. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65448. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65449. return __ret;
  65450. }
  65451. #endif
  65452. #ifdef __LITTLE_ENDIAN__
  65453. __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  65454. uint64x2_t __ret;
  65455. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65456. return __ret;
  65457. }
  65458. #else
  65459. __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
  65460. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65461. uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65462. uint64x2_t __ret;
  65463. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65464. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65465. return __ret;
  65466. }
  65467. #endif
  65468. #ifdef __LITTLE_ENDIAN__
  65469. __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  65470. uint16x8_t __ret;
  65471. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65472. return __ret;
  65473. }
  65474. #else
  65475. __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
  65476. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65477. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65478. uint16x8_t __ret;
  65479. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65480. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65481. return __ret;
  65482. }
  65483. #endif
  65484. #ifdef __LITTLE_ENDIAN__
  65485. __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
  65486. int8x16_t __ret;
  65487. __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65488. return __ret;
  65489. }
  65490. #else
  65491. __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
  65492. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65493. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65494. int8x16_t __ret;
  65495. __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  65496. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65497. return __ret;
  65498. }
  65499. #endif
  65500. #ifdef __LITTLE_ENDIAN__
  65501. __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
  65502. float64x2_t __ret;
  65503. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65504. return __ret;
  65505. }
  65506. #else
  65507. __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
  65508. float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65509. float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65510. float64x2_t __ret;
  65511. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65512. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65513. return __ret;
  65514. }
  65515. #endif
  65516. #ifdef __LITTLE_ENDIAN__
  65517. __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
  65518. float32x4_t __ret;
  65519. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65520. return __ret;
  65521. }
  65522. #else
  65523. __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
  65524. float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65525. float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65526. float32x4_t __ret;
  65527. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65528. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65529. return __ret;
  65530. }
  65531. #endif
  65532. #ifdef __LITTLE_ENDIAN__
  65533. __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
  65534. int32x4_t __ret;
  65535. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65536. return __ret;
  65537. }
  65538. #else
  65539. __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
  65540. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65541. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65542. int32x4_t __ret;
  65543. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65544. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65545. return __ret;
  65546. }
  65547. #endif
  65548. #ifdef __LITTLE_ENDIAN__
  65549. __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
  65550. int64x2_t __ret;
  65551. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65552. return __ret;
  65553. }
  65554. #else
  65555. __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
  65556. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65557. int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65558. int64x2_t __ret;
  65559. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65560. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65561. return __ret;
  65562. }
  65563. #endif
  65564. #ifdef __LITTLE_ENDIAN__
  65565. __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
  65566. int16x8_t __ret;
  65567. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65568. return __ret;
  65569. }
  65570. #else
  65571. __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
  65572. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65573. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65574. int16x8_t __ret;
  65575. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65576. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65577. return __ret;
  65578. }
  65579. #endif
  65580. #ifdef __LITTLE_ENDIAN__
  65581. __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65582. uint8x8_t __ret;
  65583. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65584. return __ret;
  65585. }
  65586. #else
  65587. __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65588. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65589. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65590. uint8x8_t __ret;
  65591. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65592. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65593. return __ret;
  65594. }
  65595. #endif
  65596. #ifdef __LITTLE_ENDIAN__
  65597. __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65598. uint32x2_t __ret;
  65599. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65600. return __ret;
  65601. }
  65602. #else
  65603. __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65604. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65605. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65606. uint32x2_t __ret;
  65607. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65608. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65609. return __ret;
  65610. }
  65611. #endif
  65612. #ifdef __LITTLE_ENDIAN__
  65613. __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65614. uint16x4_t __ret;
  65615. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65616. return __ret;
  65617. }
  65618. #else
  65619. __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65620. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65621. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65622. uint16x4_t __ret;
  65623. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65624. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65625. return __ret;
  65626. }
  65627. #endif
  65628. #ifdef __LITTLE_ENDIAN__
  65629. __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
  65630. int8x8_t __ret;
  65631. __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
  65632. return __ret;
  65633. }
  65634. #else
  65635. __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
  65636. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65637. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65638. int8x8_t __ret;
  65639. __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
  65640. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65641. return __ret;
  65642. }
  65643. #endif
  65644. #ifdef __LITTLE_ENDIAN__
  65645. __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
  65646. float32x2_t __ret;
  65647. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65648. return __ret;
  65649. }
  65650. #else
  65651. __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
  65652. float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65653. float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65654. float32x2_t __ret;
  65655. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65656. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65657. return __ret;
  65658. }
  65659. #endif
  65660. #ifdef __LITTLE_ENDIAN__
  65661. __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
  65662. int32x2_t __ret;
  65663. __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
  65664. return __ret;
  65665. }
  65666. #else
  65667. __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
  65668. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65669. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65670. int32x2_t __ret;
  65671. __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
  65672. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65673. return __ret;
  65674. }
  65675. #endif
  65676. #ifdef __LITTLE_ENDIAN__
  65677. __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
  65678. int16x4_t __ret;
  65679. __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
  65680. return __ret;
  65681. }
  65682. #else
  65683. __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
  65684. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65685. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65686. int16x4_t __ret;
  65687. __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
  65688. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65689. return __ret;
  65690. }
  65691. #endif
  65692. #endif
  65693. #ifdef __LITTLE_ENDIAN__
  65694. __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  65695. uint8x16_t __ret;
  65696. __ret = __p0 + vabdq_u8(__p1, __p2);
  65697. return __ret;
  65698. }
  65699. #else
  65700. __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  65701. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65702. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65703. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65704. uint8x16_t __ret;
  65705. __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
  65706. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65707. return __ret;
  65708. }
  65709. #endif
  65710. #ifdef __LITTLE_ENDIAN__
  65711. __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  65712. uint32x4_t __ret;
  65713. __ret = __p0 + vabdq_u32(__p1, __p2);
  65714. return __ret;
  65715. }
  65716. #else
  65717. __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  65718. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65719. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65720. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  65721. uint32x4_t __ret;
  65722. __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
  65723. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65724. return __ret;
  65725. }
  65726. #endif
  65727. #ifdef __LITTLE_ENDIAN__
  65728. __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  65729. uint16x8_t __ret;
  65730. __ret = __p0 + vabdq_u16(__p1, __p2);
  65731. return __ret;
  65732. }
  65733. #else
  65734. __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  65735. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65736. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65737. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  65738. uint16x8_t __ret;
  65739. __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
  65740. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65741. return __ret;
  65742. }
  65743. #endif
  65744. #ifdef __LITTLE_ENDIAN__
  65745. __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  65746. int8x16_t __ret;
  65747. __ret = __p0 + vabdq_s8(__p1, __p2);
  65748. return __ret;
  65749. }
  65750. #else
  65751. __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
  65752. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65753. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65754. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65755. int8x16_t __ret;
  65756. __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
  65757. __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  65758. return __ret;
  65759. }
  65760. #endif
  65761. #ifdef __LITTLE_ENDIAN__
  65762. __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  65763. int32x4_t __ret;
  65764. __ret = __p0 + vabdq_s32(__p1, __p2);
  65765. return __ret;
  65766. }
  65767. #else
  65768. __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
  65769. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65770. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65771. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  65772. int32x4_t __ret;
  65773. __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
  65774. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65775. return __ret;
  65776. }
  65777. #endif
  65778. #ifdef __LITTLE_ENDIAN__
  65779. __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  65780. int16x8_t __ret;
  65781. __ret = __p0 + vabdq_s16(__p1, __p2);
  65782. return __ret;
  65783. }
  65784. #else
  65785. __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
  65786. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65787. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65788. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  65789. int16x8_t __ret;
  65790. __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
  65791. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65792. return __ret;
  65793. }
  65794. #endif
  65795. #ifdef __LITTLE_ENDIAN__
  65796. __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  65797. uint8x8_t __ret;
  65798. __ret = __p0 + vabd_u8(__p1, __p2);
  65799. return __ret;
  65800. }
  65801. #else
  65802. __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  65803. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65804. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65805. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  65806. uint8x8_t __ret;
  65807. __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
  65808. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65809. return __ret;
  65810. }
  65811. #endif
  65812. #ifdef __LITTLE_ENDIAN__
  65813. __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  65814. uint32x2_t __ret;
  65815. __ret = __p0 + vabd_u32(__p1, __p2);
  65816. return __ret;
  65817. }
  65818. #else
  65819. __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  65820. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65821. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65822. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  65823. uint32x2_t __ret;
  65824. __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
  65825. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65826. return __ret;
  65827. }
  65828. #endif
  65829. #ifdef __LITTLE_ENDIAN__
  65830. __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  65831. uint16x4_t __ret;
  65832. __ret = __p0 + vabd_u16(__p1, __p2);
  65833. return __ret;
  65834. }
  65835. #else
  65836. __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  65837. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65838. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65839. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  65840. uint16x4_t __ret;
  65841. __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
  65842. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65843. return __ret;
  65844. }
  65845. #endif
  65846. #ifdef __LITTLE_ENDIAN__
  65847. __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  65848. int8x8_t __ret;
  65849. __ret = __p0 + vabd_s8(__p1, __p2);
  65850. return __ret;
  65851. }
  65852. #else
  65853. __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  65854. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65855. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65856. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  65857. int8x8_t __ret;
  65858. __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
  65859. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65860. return __ret;
  65861. }
  65862. #endif
  65863. #ifdef __LITTLE_ENDIAN__
  65864. __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  65865. int32x2_t __ret;
  65866. __ret = __p0 + vabd_s32(__p1, __p2);
  65867. return __ret;
  65868. }
  65869. #else
  65870. __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  65871. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65872. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65873. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  65874. int32x2_t __ret;
  65875. __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
  65876. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65877. return __ret;
  65878. }
  65879. #endif
  65880. #ifdef __LITTLE_ENDIAN__
  65881. __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  65882. int16x4_t __ret;
  65883. __ret = __p0 + vabd_s16(__p1, __p2);
  65884. return __ret;
  65885. }
  65886. #else
  65887. __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  65888. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65889. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65890. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  65891. int16x4_t __ret;
  65892. __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
  65893. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65894. return __ret;
  65895. }
  65896. #endif
  65897. #ifdef __LITTLE_ENDIAN__
  65898. __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65899. uint16x8_t __ret;
  65900. __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
  65901. return __ret;
  65902. }
  65903. #else
  65904. __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65905. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65906. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65907. uint16x8_t __ret;
  65908. __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
  65909. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65910. return __ret;
  65911. }
  65912. __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  65913. uint16x8_t __ret;
  65914. __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
  65915. return __ret;
  65916. }
  65917. #endif
  65918. #ifdef __LITTLE_ENDIAN__
  65919. __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65920. uint64x2_t __ret;
  65921. __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
  65922. return __ret;
  65923. }
  65924. #else
  65925. __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65926. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65927. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65928. uint64x2_t __ret;
  65929. __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
  65930. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65931. return __ret;
  65932. }
  65933. __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  65934. uint64x2_t __ret;
  65935. __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
  65936. return __ret;
  65937. }
  65938. #endif
  65939. #ifdef __LITTLE_ENDIAN__
  65940. __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65941. uint32x4_t __ret;
  65942. __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
  65943. return __ret;
  65944. }
  65945. #else
  65946. __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65947. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  65948. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  65949. uint32x4_t __ret;
  65950. __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
  65951. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  65952. return __ret;
  65953. }
  65954. __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  65955. uint32x4_t __ret;
  65956. __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
  65957. return __ret;
  65958. }
  65959. #endif
  65960. #ifdef __LITTLE_ENDIAN__
  65961. __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
  65962. int16x8_t __ret;
  65963. __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
  65964. return __ret;
  65965. }
  65966. #else
  65967. __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
  65968. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  65969. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  65970. int16x8_t __ret;
  65971. __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
  65972. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  65973. return __ret;
  65974. }
  65975. __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
  65976. int16x8_t __ret;
  65977. __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
  65978. return __ret;
  65979. }
  65980. #endif
  65981. #ifdef __LITTLE_ENDIAN__
  65982. __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
  65983. int64x2_t __ret;
  65984. __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
  65985. return __ret;
  65986. }
  65987. #else
  65988. __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
  65989. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  65990. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  65991. int64x2_t __ret;
  65992. __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
  65993. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  65994. return __ret;
  65995. }
  65996. __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
  65997. int64x2_t __ret;
  65998. __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
  65999. return __ret;
  66000. }
  66001. #endif
  66002. #ifdef __LITTLE_ENDIAN__
  66003. __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
  66004. int32x4_t __ret;
  66005. __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
  66006. return __ret;
  66007. }
  66008. #else
  66009. __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
  66010. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66011. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66012. int32x4_t __ret;
  66013. __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
  66014. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66015. return __ret;
  66016. }
  66017. __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
  66018. int32x4_t __ret;
  66019. __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
  66020. return __ret;
  66021. }
  66022. #endif
  66023. #ifdef __LITTLE_ENDIAN__
  66024. __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  66025. uint16x8_t __ret;
  66026. __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
  66027. return __ret;
  66028. }
  66029. #else
  66030. __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
  66031. uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66032. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66033. uint16x8_t __ret;
  66034. __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
  66035. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66036. return __ret;
  66037. }
  66038. #endif
  66039. #ifdef __LITTLE_ENDIAN__
  66040. __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  66041. uint64x2_t __ret;
  66042. __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
  66043. return __ret;
  66044. }
  66045. #else
  66046. __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
  66047. uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66048. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66049. uint64x2_t __ret;
  66050. __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
  66051. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66052. return __ret;
  66053. }
  66054. #endif
  66055. #ifdef __LITTLE_ENDIAN__
  66056. __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  66057. uint32x4_t __ret;
  66058. __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
  66059. return __ret;
  66060. }
  66061. #else
  66062. __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
  66063. uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66064. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66065. uint32x4_t __ret;
  66066. __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
  66067. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66068. return __ret;
  66069. }
  66070. #endif
  66071. #ifdef __LITTLE_ENDIAN__
  66072. __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
  66073. int16x8_t __ret;
  66074. __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
  66075. return __ret;
  66076. }
  66077. #else
  66078. __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
  66079. int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66080. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66081. int16x8_t __ret;
  66082. __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
  66083. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66084. return __ret;
  66085. }
  66086. #endif
  66087. #ifdef __LITTLE_ENDIAN__
  66088. __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
  66089. int64x2_t __ret;
  66090. __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
  66091. return __ret;
  66092. }
  66093. #else
  66094. __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
  66095. int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66096. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66097. int64x2_t __ret;
  66098. __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
  66099. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66100. return __ret;
  66101. }
  66102. #endif
  66103. #ifdef __LITTLE_ENDIAN__
  66104. __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
  66105. int32x4_t __ret;
  66106. __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
  66107. return __ret;
  66108. }
  66109. #else
  66110. __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
  66111. int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66112. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66113. int32x4_t __ret;
  66114. __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
  66115. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66116. return __ret;
  66117. }
  66118. #endif
  66119. #ifdef __LITTLE_ENDIAN__
  66120. __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
  66121. uint16x8_t __ret;
  66122. __ret = __p0 + vmovl_u8(__p1);
  66123. return __ret;
  66124. }
  66125. #else
  66126. __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
  66127. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66128. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66129. uint16x8_t __ret;
  66130. __ret = __rev0 + __noswap_vmovl_u8(__rev1);
  66131. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66132. return __ret;
  66133. }
  66134. #endif
  66135. #ifdef __LITTLE_ENDIAN__
  66136. __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
  66137. uint64x2_t __ret;
  66138. __ret = __p0 + vmovl_u32(__p1);
  66139. return __ret;
  66140. }
  66141. #else
  66142. __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
  66143. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66144. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66145. uint64x2_t __ret;
  66146. __ret = __rev0 + __noswap_vmovl_u32(__rev1);
  66147. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66148. return __ret;
  66149. }
  66150. #endif
  66151. #ifdef __LITTLE_ENDIAN__
  66152. __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
  66153. uint32x4_t __ret;
  66154. __ret = __p0 + vmovl_u16(__p1);
  66155. return __ret;
  66156. }
  66157. #else
  66158. __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
  66159. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66160. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66161. uint32x4_t __ret;
  66162. __ret = __rev0 + __noswap_vmovl_u16(__rev1);
  66163. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66164. return __ret;
  66165. }
  66166. #endif
  66167. #ifdef __LITTLE_ENDIAN__
  66168. __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
  66169. int16x8_t __ret;
  66170. __ret = __p0 + vmovl_s8(__p1);
  66171. return __ret;
  66172. }
  66173. #else
  66174. __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
  66175. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66176. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66177. int16x8_t __ret;
  66178. __ret = __rev0 + __noswap_vmovl_s8(__rev1);
  66179. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66180. return __ret;
  66181. }
  66182. #endif
  66183. #ifdef __LITTLE_ENDIAN__
  66184. __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
  66185. int64x2_t __ret;
  66186. __ret = __p0 + vmovl_s32(__p1);
  66187. return __ret;
  66188. }
  66189. #else
  66190. __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
  66191. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66192. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66193. int64x2_t __ret;
  66194. __ret = __rev0 + __noswap_vmovl_s32(__rev1);
  66195. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66196. return __ret;
  66197. }
  66198. #endif
  66199. #ifdef __LITTLE_ENDIAN__
  66200. __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
  66201. int32x4_t __ret;
  66202. __ret = __p0 + vmovl_s16(__p1);
  66203. return __ret;
  66204. }
  66205. #else
  66206. __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
  66207. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66208. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66209. int32x4_t __ret;
  66210. __ret = __rev0 + __noswap_vmovl_s16(__rev1);
  66211. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66212. return __ret;
  66213. }
  66214. #endif
  66215. #ifdef __LITTLE_ENDIAN__
  66216. #define vget_lane_f16(__p0_254, __p1_254) __extension__ ({ \
  66217. float16x4_t __s0_254 = __p0_254; \
  66218. float16_t __ret_254; \
  66219. float16x4_t __reint_254 = __s0_254; \
  66220. int16_t __reint1_254 = vget_lane_s16(*(int16x4_t *) &__reint_254, __p1_254); \
  66221. __ret_254 = *(float16_t *) &__reint1_254; \
  66222. __ret_254; \
  66223. })
  66224. #else
  66225. #define vget_lane_f16(__p0_255, __p1_255) __extension__ ({ \
  66226. float16x4_t __s0_255 = __p0_255; \
  66227. float16x4_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 3, 2, 1, 0); \
  66228. float16_t __ret_255; \
  66229. float16x4_t __reint_255 = __rev0_255; \
  66230. int16_t __reint1_255 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_255, __p1_255); \
  66231. __ret_255 = *(float16_t *) &__reint1_255; \
  66232. __ret_255; \
  66233. })
  66234. #define __noswap_vget_lane_f16(__p0_256, __p1_256) __extension__ ({ \
  66235. float16x4_t __s0_256 = __p0_256; \
  66236. float16_t __ret_256; \
  66237. float16x4_t __reint_256 = __s0_256; \
  66238. int16_t __reint1_256 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_256, __p1_256); \
  66239. __ret_256 = *(float16_t *) &__reint1_256; \
  66240. __ret_256; \
  66241. })
  66242. #endif
  66243. #ifdef __LITTLE_ENDIAN__
  66244. #define vgetq_lane_f16(__p0_257, __p1_257) __extension__ ({ \
  66245. float16x8_t __s0_257 = __p0_257; \
  66246. float16_t __ret_257; \
  66247. float16x8_t __reint_257 = __s0_257; \
  66248. int16_t __reint1_257 = vgetq_lane_s16(*(int16x8_t *) &__reint_257, __p1_257); \
  66249. __ret_257 = *(float16_t *) &__reint1_257; \
  66250. __ret_257; \
  66251. })
  66252. #else
  66253. #define vgetq_lane_f16(__p0_258, __p1_258) __extension__ ({ \
  66254. float16x8_t __s0_258 = __p0_258; \
  66255. float16x8_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 7, 6, 5, 4, 3, 2, 1, 0); \
  66256. float16_t __ret_258; \
  66257. float16x8_t __reint_258 = __rev0_258; \
  66258. int16_t __reint1_258 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_258, __p1_258); \
  66259. __ret_258 = *(float16_t *) &__reint1_258; \
  66260. __ret_258; \
  66261. })
  66262. #define __noswap_vgetq_lane_f16(__p0_259, __p1_259) __extension__ ({ \
  66263. float16x8_t __s0_259 = __p0_259; \
  66264. float16_t __ret_259; \
  66265. float16x8_t __reint_259 = __s0_259; \
  66266. int16_t __reint1_259 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_259, __p1_259); \
  66267. __ret_259 = *(float16_t *) &__reint1_259; \
  66268. __ret_259; \
  66269. })
  66270. #endif
  66271. #ifdef __LITTLE_ENDIAN__
  66272. __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66273. uint16x8_t __ret;
  66274. __ret = __p0 + vmull_u8(__p1, __p2);
  66275. return __ret;
  66276. }
  66277. #else
  66278. __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66279. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66280. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66281. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  66282. uint16x8_t __ret;
  66283. __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
  66284. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66285. return __ret;
  66286. }
  66287. __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66288. uint16x8_t __ret;
  66289. __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
  66290. return __ret;
  66291. }
  66292. #endif
  66293. #ifdef __LITTLE_ENDIAN__
  66294. __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66295. uint64x2_t __ret;
  66296. __ret = __p0 + vmull_u32(__p1, __p2);
  66297. return __ret;
  66298. }
  66299. #else
  66300. __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66301. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66302. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66303. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  66304. uint64x2_t __ret;
  66305. __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
  66306. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66307. return __ret;
  66308. }
  66309. __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66310. uint64x2_t __ret;
  66311. __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
  66312. return __ret;
  66313. }
  66314. #endif
  66315. #ifdef __LITTLE_ENDIAN__
  66316. __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66317. uint32x4_t __ret;
  66318. __ret = __p0 + vmull_u16(__p1, __p2);
  66319. return __ret;
  66320. }
  66321. #else
  66322. __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66323. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66324. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66325. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  66326. uint32x4_t __ret;
  66327. __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
  66328. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66329. return __ret;
  66330. }
  66331. __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66332. uint32x4_t __ret;
  66333. __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
  66334. return __ret;
  66335. }
  66336. #endif
  66337. #ifdef __LITTLE_ENDIAN__
  66338. __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66339. int16x8_t __ret;
  66340. __ret = __p0 + vmull_s8(__p1, __p2);
  66341. return __ret;
  66342. }
  66343. #else
  66344. __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66345. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66346. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66347. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  66348. int16x8_t __ret;
  66349. __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
  66350. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66351. return __ret;
  66352. }
  66353. __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66354. int16x8_t __ret;
  66355. __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
  66356. return __ret;
  66357. }
  66358. #endif
  66359. #ifdef __LITTLE_ENDIAN__
  66360. __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66361. int64x2_t __ret;
  66362. __ret = __p0 + vmull_s32(__p1, __p2);
  66363. return __ret;
  66364. }
  66365. #else
  66366. __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66367. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66368. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66369. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  66370. int64x2_t __ret;
  66371. __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
  66372. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66373. return __ret;
  66374. }
  66375. __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66376. int64x2_t __ret;
  66377. __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
  66378. return __ret;
  66379. }
  66380. #endif
  66381. #ifdef __LITTLE_ENDIAN__
  66382. __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66383. int32x4_t __ret;
  66384. __ret = __p0 + vmull_s16(__p1, __p2);
  66385. return __ret;
  66386. }
  66387. #else
  66388. __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66389. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66390. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66391. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  66392. int32x4_t __ret;
  66393. __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
  66394. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66395. return __ret;
  66396. }
  66397. __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66398. int32x4_t __ret;
  66399. __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
  66400. return __ret;
  66401. }
  66402. #endif
  66403. #ifdef __LITTLE_ENDIAN__
  66404. #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66405. uint64x2_t __s0 = __p0; \
  66406. uint32x2_t __s1 = __p1; \
  66407. uint32x2_t __s2 = __p2; \
  66408. uint64x2_t __ret; \
  66409. __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  66410. __ret; \
  66411. })
  66412. #else
  66413. #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66414. uint64x2_t __s0 = __p0; \
  66415. uint32x2_t __s1 = __p1; \
  66416. uint32x2_t __s2 = __p2; \
  66417. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  66418. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  66419. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  66420. uint64x2_t __ret; \
  66421. __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  66422. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  66423. __ret; \
  66424. })
  66425. #endif
  66426. #ifdef __LITTLE_ENDIAN__
  66427. #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66428. uint32x4_t __s0 = __p0; \
  66429. uint16x4_t __s1 = __p1; \
  66430. uint16x4_t __s2 = __p2; \
  66431. uint32x4_t __ret; \
  66432. __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  66433. __ret; \
  66434. })
  66435. #else
  66436. #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66437. uint32x4_t __s0 = __p0; \
  66438. uint16x4_t __s1 = __p1; \
  66439. uint16x4_t __s2 = __p2; \
  66440. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  66441. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  66442. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  66443. uint32x4_t __ret; \
  66444. __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  66445. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  66446. __ret; \
  66447. })
  66448. #endif
  66449. #ifdef __LITTLE_ENDIAN__
  66450. #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66451. int64x2_t __s0 = __p0; \
  66452. int32x2_t __s1 = __p1; \
  66453. int32x2_t __s2 = __p2; \
  66454. int64x2_t __ret; \
  66455. __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  66456. __ret; \
  66457. })
  66458. #else
  66459. #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66460. int64x2_t __s0 = __p0; \
  66461. int32x2_t __s1 = __p1; \
  66462. int32x2_t __s2 = __p2; \
  66463. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  66464. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  66465. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  66466. int64x2_t __ret; \
  66467. __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  66468. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  66469. __ret; \
  66470. })
  66471. #endif
  66472. #ifdef __LITTLE_ENDIAN__
  66473. #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66474. int32x4_t __s0 = __p0; \
  66475. int16x4_t __s1 = __p1; \
  66476. int16x4_t __s2 = __p2; \
  66477. int32x4_t __ret; \
  66478. __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  66479. __ret; \
  66480. })
  66481. #else
  66482. #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66483. int32x4_t __s0 = __p0; \
  66484. int16x4_t __s1 = __p1; \
  66485. int16x4_t __s2 = __p2; \
  66486. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  66487. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  66488. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  66489. int32x4_t __ret; \
  66490. __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  66491. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  66492. __ret; \
  66493. })
  66494. #endif
  66495. #ifdef __LITTLE_ENDIAN__
  66496. __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66497. uint64x2_t __ret;
  66498. __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
  66499. return __ret;
  66500. }
  66501. #else
  66502. __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66503. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66504. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66505. uint64x2_t __ret;
  66506. __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
  66507. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66508. return __ret;
  66509. }
  66510. __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66511. uint64x2_t __ret;
  66512. __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
  66513. return __ret;
  66514. }
  66515. #endif
  66516. #ifdef __LITTLE_ENDIAN__
  66517. __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66518. uint32x4_t __ret;
  66519. __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66520. return __ret;
  66521. }
  66522. #else
  66523. __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66524. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66525. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66526. uint32x4_t __ret;
  66527. __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66528. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66529. return __ret;
  66530. }
  66531. __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66532. uint32x4_t __ret;
  66533. __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66534. return __ret;
  66535. }
  66536. #endif
  66537. #ifdef __LITTLE_ENDIAN__
  66538. __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66539. int64x2_t __ret;
  66540. __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
  66541. return __ret;
  66542. }
  66543. #else
  66544. __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66545. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66546. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66547. int64x2_t __ret;
  66548. __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
  66549. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66550. return __ret;
  66551. }
  66552. __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66553. int64x2_t __ret;
  66554. __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
  66555. return __ret;
  66556. }
  66557. #endif
  66558. #ifdef __LITTLE_ENDIAN__
  66559. __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66560. int32x4_t __ret;
  66561. __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66562. return __ret;
  66563. }
  66564. #else
  66565. __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66566. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66567. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66568. int32x4_t __ret;
  66569. __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66570. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66571. return __ret;
  66572. }
  66573. __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66574. int32x4_t __ret;
  66575. __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66576. return __ret;
  66577. }
  66578. #endif
  66579. #ifdef __LITTLE_ENDIAN__
  66580. __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66581. uint16x8_t __ret;
  66582. __ret = __p0 - vmull_u8(__p1, __p2);
  66583. return __ret;
  66584. }
  66585. #else
  66586. __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66587. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66588. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66589. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  66590. uint16x8_t __ret;
  66591. __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
  66592. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66593. return __ret;
  66594. }
  66595. __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  66596. uint16x8_t __ret;
  66597. __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
  66598. return __ret;
  66599. }
  66600. #endif
  66601. #ifdef __LITTLE_ENDIAN__
  66602. __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66603. uint64x2_t __ret;
  66604. __ret = __p0 - vmull_u32(__p1, __p2);
  66605. return __ret;
  66606. }
  66607. #else
  66608. __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66609. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66610. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66611. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  66612. uint64x2_t __ret;
  66613. __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
  66614. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66615. return __ret;
  66616. }
  66617. __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  66618. uint64x2_t __ret;
  66619. __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
  66620. return __ret;
  66621. }
  66622. #endif
  66623. #ifdef __LITTLE_ENDIAN__
  66624. __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66625. uint32x4_t __ret;
  66626. __ret = __p0 - vmull_u16(__p1, __p2);
  66627. return __ret;
  66628. }
  66629. #else
  66630. __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66631. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66632. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66633. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  66634. uint32x4_t __ret;
  66635. __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
  66636. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66637. return __ret;
  66638. }
  66639. __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  66640. uint32x4_t __ret;
  66641. __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
  66642. return __ret;
  66643. }
  66644. #endif
  66645. #ifdef __LITTLE_ENDIAN__
  66646. __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66647. int16x8_t __ret;
  66648. __ret = __p0 - vmull_s8(__p1, __p2);
  66649. return __ret;
  66650. }
  66651. #else
  66652. __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66653. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  66654. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  66655. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  66656. int16x8_t __ret;
  66657. __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
  66658. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  66659. return __ret;
  66660. }
  66661. __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  66662. int16x8_t __ret;
  66663. __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
  66664. return __ret;
  66665. }
  66666. #endif
  66667. #ifdef __LITTLE_ENDIAN__
  66668. __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66669. int64x2_t __ret;
  66670. __ret = __p0 - vmull_s32(__p1, __p2);
  66671. return __ret;
  66672. }
  66673. #else
  66674. __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66675. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66676. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66677. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  66678. int64x2_t __ret;
  66679. __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
  66680. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66681. return __ret;
  66682. }
  66683. __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  66684. int64x2_t __ret;
  66685. __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
  66686. return __ret;
  66687. }
  66688. #endif
  66689. #ifdef __LITTLE_ENDIAN__
  66690. __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66691. int32x4_t __ret;
  66692. __ret = __p0 - vmull_s16(__p1, __p2);
  66693. return __ret;
  66694. }
  66695. #else
  66696. __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66697. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66698. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66699. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  66700. int32x4_t __ret;
  66701. __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
  66702. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66703. return __ret;
  66704. }
  66705. __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  66706. int32x4_t __ret;
  66707. __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
  66708. return __ret;
  66709. }
  66710. #endif
  66711. #ifdef __LITTLE_ENDIAN__
  66712. #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66713. uint64x2_t __s0 = __p0; \
  66714. uint32x2_t __s1 = __p1; \
  66715. uint32x2_t __s2 = __p2; \
  66716. uint64x2_t __ret; \
  66717. __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  66718. __ret; \
  66719. })
  66720. #else
  66721. #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66722. uint64x2_t __s0 = __p0; \
  66723. uint32x2_t __s1 = __p1; \
  66724. uint32x2_t __s2 = __p2; \
  66725. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  66726. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  66727. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  66728. uint64x2_t __ret; \
  66729. __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  66730. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  66731. __ret; \
  66732. })
  66733. #endif
  66734. #ifdef __LITTLE_ENDIAN__
  66735. #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66736. uint32x4_t __s0 = __p0; \
  66737. uint16x4_t __s1 = __p1; \
  66738. uint16x4_t __s2 = __p2; \
  66739. uint32x4_t __ret; \
  66740. __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  66741. __ret; \
  66742. })
  66743. #else
  66744. #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66745. uint32x4_t __s0 = __p0; \
  66746. uint16x4_t __s1 = __p1; \
  66747. uint16x4_t __s2 = __p2; \
  66748. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  66749. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  66750. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  66751. uint32x4_t __ret; \
  66752. __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  66753. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  66754. __ret; \
  66755. })
  66756. #endif
  66757. #ifdef __LITTLE_ENDIAN__
  66758. #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66759. int64x2_t __s0 = __p0; \
  66760. int32x2_t __s1 = __p1; \
  66761. int32x2_t __s2 = __p2; \
  66762. int64x2_t __ret; \
  66763. __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
  66764. __ret; \
  66765. })
  66766. #else
  66767. #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
  66768. int64x2_t __s0 = __p0; \
  66769. int32x2_t __s1 = __p1; \
  66770. int32x2_t __s2 = __p2; \
  66771. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
  66772. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
  66773. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
  66774. int64x2_t __ret; \
  66775. __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
  66776. __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
  66777. __ret; \
  66778. })
  66779. #endif
  66780. #ifdef __LITTLE_ENDIAN__
  66781. #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66782. int32x4_t __s0 = __p0; \
  66783. int16x4_t __s1 = __p1; \
  66784. int16x4_t __s2 = __p2; \
  66785. int32x4_t __ret; \
  66786. __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
  66787. __ret; \
  66788. })
  66789. #else
  66790. #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
  66791. int32x4_t __s0 = __p0; \
  66792. int16x4_t __s1 = __p1; \
  66793. int16x4_t __s2 = __p2; \
  66794. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
  66795. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
  66796. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
  66797. int32x4_t __ret; \
  66798. __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
  66799. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
  66800. __ret; \
  66801. })
  66802. #endif
  66803. #ifdef __LITTLE_ENDIAN__
  66804. __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66805. uint64x2_t __ret;
  66806. __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
  66807. return __ret;
  66808. }
  66809. #else
  66810. __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66811. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66812. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66813. uint64x2_t __ret;
  66814. __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
  66815. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66816. return __ret;
  66817. }
  66818. __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
  66819. uint64x2_t __ret;
  66820. __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
  66821. return __ret;
  66822. }
  66823. #endif
  66824. #ifdef __LITTLE_ENDIAN__
  66825. __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66826. uint32x4_t __ret;
  66827. __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66828. return __ret;
  66829. }
  66830. #else
  66831. __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66832. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66833. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66834. uint32x4_t __ret;
  66835. __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66836. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66837. return __ret;
  66838. }
  66839. __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
  66840. uint32x4_t __ret;
  66841. __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
  66842. return __ret;
  66843. }
  66844. #endif
  66845. #ifdef __LITTLE_ENDIAN__
  66846. __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66847. int64x2_t __ret;
  66848. __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
  66849. return __ret;
  66850. }
  66851. #else
  66852. __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66853. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  66854. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  66855. int64x2_t __ret;
  66856. __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
  66857. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  66858. return __ret;
  66859. }
  66860. __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
  66861. int64x2_t __ret;
  66862. __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
  66863. return __ret;
  66864. }
  66865. #endif
  66866. #ifdef __LITTLE_ENDIAN__
  66867. __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66868. int32x4_t __ret;
  66869. __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66870. return __ret;
  66871. }
  66872. #else
  66873. __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66874. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  66875. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  66876. int32x4_t __ret;
  66877. __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66878. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  66879. return __ret;
  66880. }
  66881. __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
  66882. int32x4_t __ret;
  66883. __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
  66884. return __ret;
  66885. }
  66886. #endif
  66887. #ifdef __LITTLE_ENDIAN__
  66888. #define vset_lane_f16(__p0_260, __p1_260, __p2_260) __extension__ ({ \
  66889. float16_t __s0_260 = __p0_260; \
  66890. float16x4_t __s1_260 = __p1_260; \
  66891. float16x4_t __ret_260; \
  66892. float16_t __reint_260 = __s0_260; \
  66893. float16x4_t __reint1_260 = __s1_260; \
  66894. int16x4_t __reint2_260 = vset_lane_s16(*(int16_t *) &__reint_260, *(int16x4_t *) &__reint1_260, __p2_260); \
  66895. __ret_260 = *(float16x4_t *) &__reint2_260; \
  66896. __ret_260; \
  66897. })
  66898. #else
  66899. #define vset_lane_f16(__p0_261, __p1_261, __p2_261) __extension__ ({ \
  66900. float16_t __s0_261 = __p0_261; \
  66901. float16x4_t __s1_261 = __p1_261; \
  66902. float16x4_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
  66903. float16x4_t __ret_261; \
  66904. float16_t __reint_261 = __s0_261; \
  66905. float16x4_t __reint1_261 = __rev1_261; \
  66906. int16x4_t __reint2_261 = __noswap_vset_lane_s16(*(int16_t *) &__reint_261, *(int16x4_t *) &__reint1_261, __p2_261); \
  66907. __ret_261 = *(float16x4_t *) &__reint2_261; \
  66908. __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 3, 2, 1, 0); \
  66909. __ret_261; \
  66910. })
  66911. #endif
  66912. #ifdef __LITTLE_ENDIAN__
  66913. #define vsetq_lane_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \
  66914. float16_t __s0_262 = __p0_262; \
  66915. float16x8_t __s1_262 = __p1_262; \
  66916. float16x8_t __ret_262; \
  66917. float16_t __reint_262 = __s0_262; \
  66918. float16x8_t __reint1_262 = __s1_262; \
  66919. int16x8_t __reint2_262 = vsetq_lane_s16(*(int16_t *) &__reint_262, *(int16x8_t *) &__reint1_262, __p2_262); \
  66920. __ret_262 = *(float16x8_t *) &__reint2_262; \
  66921. __ret_262; \
  66922. })
  66923. #else
  66924. #define vsetq_lane_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \
  66925. float16_t __s0_263 = __p0_263; \
  66926. float16x8_t __s1_263 = __p1_263; \
  66927. float16x8_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
  66928. float16x8_t __ret_263; \
  66929. float16_t __reint_263 = __s0_263; \
  66930. float16x8_t __reint1_263 = __rev1_263; \
  66931. int16x8_t __reint2_263 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_263, *(int16x8_t *) &__reint1_263, __p2_263); \
  66932. __ret_263 = *(float16x8_t *) &__reint2_263; \
  66933. __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 7, 6, 5, 4, 3, 2, 1, 0); \
  66934. __ret_263; \
  66935. })
  66936. #endif
  66937. #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
  66938. #ifdef __LITTLE_ENDIAN__
  66939. #define vmulh_lane_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \
  66940. float16_t __s0_264 = __p0_264; \
  66941. float16x4_t __s1_264 = __p1_264; \
  66942. float16_t __ret_264; \
  66943. __ret_264 = __s0_264 * vget_lane_f16(__s1_264, __p2_264); \
  66944. __ret_264; \
  66945. })
  66946. #else
  66947. #define vmulh_lane_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \
  66948. float16_t __s0_265 = __p0_265; \
  66949. float16x4_t __s1_265 = __p1_265; \
  66950. float16x4_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
  66951. float16_t __ret_265; \
  66952. __ret_265 = __s0_265 * __noswap_vget_lane_f16(__rev1_265, __p2_265); \
  66953. __ret_265; \
  66954. })
  66955. #endif
  66956. #ifdef __LITTLE_ENDIAN__
  66957. #define vmulh_laneq_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \
  66958. float16_t __s0_266 = __p0_266; \
  66959. float16x8_t __s1_266 = __p1_266; \
  66960. float16_t __ret_266; \
  66961. __ret_266 = __s0_266 * vgetq_lane_f16(__s1_266, __p2_266); \
  66962. __ret_266; \
  66963. })
  66964. #else
  66965. #define vmulh_laneq_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \
  66966. float16_t __s0_267 = __p0_267; \
  66967. float16x8_t __s1_267 = __p1_267; \
  66968. float16x8_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
  66969. float16_t __ret_267; \
  66970. __ret_267 = __s0_267 * __noswap_vgetq_lane_f16(__rev1_267, __p2_267); \
  66971. __ret_267; \
  66972. })
  66973. #endif
  66974. #endif
  66975. #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
  66976. #ifdef __LITTLE_ENDIAN__
  66977. __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
  66978. int32_t __ret;
  66979. __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
  66980. return __ret;
  66981. }
  66982. #else
  66983. __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
  66984. int32_t __ret;
  66985. __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
  66986. return __ret;
  66987. }
  66988. #endif
  66989. #ifdef __LITTLE_ENDIAN__
  66990. __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
  66991. int16_t __ret;
  66992. __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
  66993. return __ret;
  66994. }
  66995. #else
  66996. __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
  66997. int16_t __ret;
  66998. __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
  66999. return __ret;
  67000. }
  67001. #endif
  67002. #ifdef __LITTLE_ENDIAN__
  67003. #define vqrdmlahs_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
  67004. int32_t __s0_268 = __p0_268; \
  67005. int32_t __s1_268 = __p1_268; \
  67006. int32x2_t __s2_268 = __p2_268; \
  67007. int32_t __ret_268; \
  67008. __ret_268 = vqadds_s32(__s0_268, vqrdmulhs_s32(__s1_268, vget_lane_s32(__s2_268, __p3_268))); \
  67009. __ret_268; \
  67010. })
  67011. #else
  67012. #define vqrdmlahs_lane_s32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
  67013. int32_t __s0_269 = __p0_269; \
  67014. int32_t __s1_269 = __p1_269; \
  67015. int32x2_t __s2_269 = __p2_269; \
  67016. int32x2_t __rev2_269; __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 1, 0); \
  67017. int32_t __ret_269; \
  67018. __ret_269 = __noswap_vqadds_s32(__s0_269, __noswap_vqrdmulhs_s32(__s1_269, __noswap_vget_lane_s32(__rev2_269, __p3_269))); \
  67019. __ret_269; \
  67020. })
  67021. #endif
  67022. #ifdef __LITTLE_ENDIAN__
  67023. #define vqrdmlahh_lane_s16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
  67024. int16_t __s0_270 = __p0_270; \
  67025. int16_t __s1_270 = __p1_270; \
  67026. int16x4_t __s2_270 = __p2_270; \
  67027. int16_t __ret_270; \
  67028. __ret_270 = vqaddh_s16(__s0_270, vqrdmulhh_s16(__s1_270, vget_lane_s16(__s2_270, __p3_270))); \
  67029. __ret_270; \
  67030. })
  67031. #else
  67032. #define vqrdmlahh_lane_s16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
  67033. int16_t __s0_271 = __p0_271; \
  67034. int16_t __s1_271 = __p1_271; \
  67035. int16x4_t __s2_271 = __p2_271; \
  67036. int16x4_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 3, 2, 1, 0); \
  67037. int16_t __ret_271; \
  67038. __ret_271 = __noswap_vqaddh_s16(__s0_271, __noswap_vqrdmulhh_s16(__s1_271, __noswap_vget_lane_s16(__rev2_271, __p3_271))); \
  67039. __ret_271; \
  67040. })
  67041. #endif
  67042. #ifdef __LITTLE_ENDIAN__
  67043. #define vqrdmlahs_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
  67044. int32_t __s0_272 = __p0_272; \
  67045. int32_t __s1_272 = __p1_272; \
  67046. int32x4_t __s2_272 = __p2_272; \
  67047. int32_t __ret_272; \
  67048. __ret_272 = vqadds_s32(__s0_272, vqrdmulhs_s32(__s1_272, vgetq_lane_s32(__s2_272, __p3_272))); \
  67049. __ret_272; \
  67050. })
  67051. #else
  67052. #define vqrdmlahs_laneq_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
  67053. int32_t __s0_273 = __p0_273; \
  67054. int32_t __s1_273 = __p1_273; \
  67055. int32x4_t __s2_273 = __p2_273; \
  67056. int32x4_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 3, 2, 1, 0); \
  67057. int32_t __ret_273; \
  67058. __ret_273 = __noswap_vqadds_s32(__s0_273, __noswap_vqrdmulhs_s32(__s1_273, __noswap_vgetq_lane_s32(__rev2_273, __p3_273))); \
  67059. __ret_273; \
  67060. })
  67061. #endif
  67062. #ifdef __LITTLE_ENDIAN__
  67063. #define vqrdmlahh_laneq_s16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
  67064. int16_t __s0_274 = __p0_274; \
  67065. int16_t __s1_274 = __p1_274; \
  67066. int16x8_t __s2_274 = __p2_274; \
  67067. int16_t __ret_274; \
  67068. __ret_274 = vqaddh_s16(__s0_274, vqrdmulhh_s16(__s1_274, vgetq_lane_s16(__s2_274, __p3_274))); \
  67069. __ret_274; \
  67070. })
  67071. #else
  67072. #define vqrdmlahh_laneq_s16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
  67073. int16_t __s0_275 = __p0_275; \
  67074. int16_t __s1_275 = __p1_275; \
  67075. int16x8_t __s2_275 = __p2_275; \
  67076. int16x8_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 7, 6, 5, 4, 3, 2, 1, 0); \
  67077. int16_t __ret_275; \
  67078. __ret_275 = __noswap_vqaddh_s16(__s0_275, __noswap_vqrdmulhh_s16(__s1_275, __noswap_vgetq_lane_s16(__rev2_275, __p3_275))); \
  67079. __ret_275; \
  67080. })
  67081. #endif
  67082. #ifdef __LITTLE_ENDIAN__
  67083. __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
  67084. int32_t __ret;
  67085. __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
  67086. return __ret;
  67087. }
  67088. #else
  67089. __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
  67090. int32_t __ret;
  67091. __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
  67092. return __ret;
  67093. }
  67094. #endif
  67095. #ifdef __LITTLE_ENDIAN__
  67096. __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
  67097. int16_t __ret;
  67098. __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
  67099. return __ret;
  67100. }
  67101. #else
  67102. __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
  67103. int16_t __ret;
  67104. __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
  67105. return __ret;
  67106. }
  67107. #endif
  67108. #ifdef __LITTLE_ENDIAN__
  67109. #define vqrdmlshs_lane_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
  67110. int32_t __s0_276 = __p0_276; \
  67111. int32_t __s1_276 = __p1_276; \
  67112. int32x2_t __s2_276 = __p2_276; \
  67113. int32_t __ret_276; \
  67114. __ret_276 = vqsubs_s32(__s0_276, vqrdmulhs_s32(__s1_276, vget_lane_s32(__s2_276, __p3_276))); \
  67115. __ret_276; \
  67116. })
  67117. #else
  67118. #define vqrdmlshs_lane_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
  67119. int32_t __s0_277 = __p0_277; \
  67120. int32_t __s1_277 = __p1_277; \
  67121. int32x2_t __s2_277 = __p2_277; \
  67122. int32x2_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 1, 0); \
  67123. int32_t __ret_277; \
  67124. __ret_277 = __noswap_vqsubs_s32(__s0_277, __noswap_vqrdmulhs_s32(__s1_277, __noswap_vget_lane_s32(__rev2_277, __p3_277))); \
  67125. __ret_277; \
  67126. })
  67127. #endif
  67128. #ifdef __LITTLE_ENDIAN__
  67129. #define vqrdmlshh_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
  67130. int16_t __s0_278 = __p0_278; \
  67131. int16_t __s1_278 = __p1_278; \
  67132. int16x4_t __s2_278 = __p2_278; \
  67133. int16_t __ret_278; \
  67134. __ret_278 = vqsubh_s16(__s0_278, vqrdmulhh_s16(__s1_278, vget_lane_s16(__s2_278, __p3_278))); \
  67135. __ret_278; \
  67136. })
  67137. #else
  67138. #define vqrdmlshh_lane_s16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
  67139. int16_t __s0_279 = __p0_279; \
  67140. int16_t __s1_279 = __p1_279; \
  67141. int16x4_t __s2_279 = __p2_279; \
  67142. int16x4_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
  67143. int16_t __ret_279; \
  67144. __ret_279 = __noswap_vqsubh_s16(__s0_279, __noswap_vqrdmulhh_s16(__s1_279, __noswap_vget_lane_s16(__rev2_279, __p3_279))); \
  67145. __ret_279; \
  67146. })
  67147. #endif
  67148. #ifdef __LITTLE_ENDIAN__
  67149. #define vqrdmlshs_laneq_s32(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
  67150. int32_t __s0_280 = __p0_280; \
  67151. int32_t __s1_280 = __p1_280; \
  67152. int32x4_t __s2_280 = __p2_280; \
  67153. int32_t __ret_280; \
  67154. __ret_280 = vqsubs_s32(__s0_280, vqrdmulhs_s32(__s1_280, vgetq_lane_s32(__s2_280, __p3_280))); \
  67155. __ret_280; \
  67156. })
  67157. #else
  67158. #define vqrdmlshs_laneq_s32(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
  67159. int32_t __s0_281 = __p0_281; \
  67160. int32_t __s1_281 = __p1_281; \
  67161. int32x4_t __s2_281 = __p2_281; \
  67162. int32x4_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
  67163. int32_t __ret_281; \
  67164. __ret_281 = __noswap_vqsubs_s32(__s0_281, __noswap_vqrdmulhs_s32(__s1_281, __noswap_vgetq_lane_s32(__rev2_281, __p3_281))); \
  67165. __ret_281; \
  67166. })
  67167. #endif
  67168. #ifdef __LITTLE_ENDIAN__
  67169. #define vqrdmlshh_laneq_s16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
  67170. int16_t __s0_282 = __p0_282; \
  67171. int16_t __s1_282 = __p1_282; \
  67172. int16x8_t __s2_282 = __p2_282; \
  67173. int16_t __ret_282; \
  67174. __ret_282 = vqsubh_s16(__s0_282, vqrdmulhh_s16(__s1_282, vgetq_lane_s16(__s2_282, __p3_282))); \
  67175. __ret_282; \
  67176. })
  67177. #else
  67178. #define vqrdmlshh_laneq_s16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
  67179. int16_t __s0_283 = __p0_283; \
  67180. int16_t __s1_283 = __p1_283; \
  67181. int16x8_t __s2_283 = __p2_283; \
  67182. int16x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
  67183. int16_t __ret_283; \
  67184. __ret_283 = __noswap_vqsubh_s16(__s0_283, __noswap_vqrdmulhh_s16(__s1_283, __noswap_vgetq_lane_s16(__rev2_283, __p3_283))); \
  67185. __ret_283; \
  67186. })
  67187. #endif
  67188. #endif
  67189. #if defined(__aarch64__)
  67190. #ifdef __LITTLE_ENDIAN__
  67191. __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  67192. uint16x8_t __ret;
  67193. __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
  67194. return __ret;
  67195. }
  67196. #else
  67197. __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  67198. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67199. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67200. uint16x8_t __ret;
  67201. __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
  67202. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67203. return __ret;
  67204. }
  67205. #endif
  67206. #ifdef __LITTLE_ENDIAN__
  67207. __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  67208. uint64x2_t __ret;
  67209. __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
  67210. return __ret;
  67211. }
  67212. #else
  67213. __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  67214. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67215. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67216. uint64x2_t __ret;
  67217. __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
  67218. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67219. return __ret;
  67220. }
  67221. #endif
  67222. #ifdef __LITTLE_ENDIAN__
  67223. __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  67224. uint32x4_t __ret;
  67225. __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
  67226. return __ret;
  67227. }
  67228. #else
  67229. __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  67230. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67231. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67232. uint32x4_t __ret;
  67233. __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
  67234. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67235. return __ret;
  67236. }
  67237. #endif
  67238. #ifdef __LITTLE_ENDIAN__
  67239. __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  67240. int16x8_t __ret;
  67241. __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
  67242. return __ret;
  67243. }
  67244. #else
  67245. __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  67246. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67247. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67248. int16x8_t __ret;
  67249. __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
  67250. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67251. return __ret;
  67252. }
  67253. #endif
  67254. #ifdef __LITTLE_ENDIAN__
  67255. __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  67256. int64x2_t __ret;
  67257. __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
  67258. return __ret;
  67259. }
  67260. #else
  67261. __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  67262. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67263. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67264. int64x2_t __ret;
  67265. __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
  67266. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67267. return __ret;
  67268. }
  67269. #endif
  67270. #ifdef __LITTLE_ENDIAN__
  67271. __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  67272. int32x4_t __ret;
  67273. __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
  67274. return __ret;
  67275. }
  67276. #else
  67277. __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  67278. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67279. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67280. int32x4_t __ret;
  67281. __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
  67282. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67283. return __ret;
  67284. }
  67285. #endif
  67286. #ifdef __LITTLE_ENDIAN__
  67287. __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  67288. uint16x8_t __ret;
  67289. __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
  67290. return __ret;
  67291. }
  67292. #else
  67293. __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
  67294. uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67295. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67296. uint16x8_t __ret;
  67297. __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
  67298. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67299. return __ret;
  67300. }
  67301. #endif
  67302. #ifdef __LITTLE_ENDIAN__
  67303. __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  67304. uint64x2_t __ret;
  67305. __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
  67306. return __ret;
  67307. }
  67308. #else
  67309. __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
  67310. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67311. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67312. uint64x2_t __ret;
  67313. __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
  67314. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67315. return __ret;
  67316. }
  67317. #endif
  67318. #ifdef __LITTLE_ENDIAN__
  67319. __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  67320. uint32x4_t __ret;
  67321. __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
  67322. return __ret;
  67323. }
  67324. #else
  67325. __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
  67326. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67327. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67328. uint32x4_t __ret;
  67329. __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
  67330. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67331. return __ret;
  67332. }
  67333. #endif
  67334. #ifdef __LITTLE_ENDIAN__
  67335. __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  67336. int16x8_t __ret;
  67337. __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
  67338. return __ret;
  67339. }
  67340. #else
  67341. __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
  67342. int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67343. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67344. int16x8_t __ret;
  67345. __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
  67346. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67347. return __ret;
  67348. }
  67349. #endif
  67350. #ifdef __LITTLE_ENDIAN__
  67351. __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  67352. int64x2_t __ret;
  67353. __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
  67354. return __ret;
  67355. }
  67356. #else
  67357. __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
  67358. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67359. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67360. int64x2_t __ret;
  67361. __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
  67362. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67363. return __ret;
  67364. }
  67365. #endif
  67366. #ifdef __LITTLE_ENDIAN__
  67367. __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  67368. int32x4_t __ret;
  67369. __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
  67370. return __ret;
  67371. }
  67372. #else
  67373. __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
  67374. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67375. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67376. int32x4_t __ret;
  67377. __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
  67378. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67379. return __ret;
  67380. }
  67381. #endif
  67382. #ifdef __LITTLE_ENDIAN__
  67383. __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
  67384. uint16x8_t __ret;
  67385. __ret = __p0 + vmovl_high_u8(__p1);
  67386. return __ret;
  67387. }
  67388. #else
  67389. __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
  67390. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67391. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67392. uint16x8_t __ret;
  67393. __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
  67394. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67395. return __ret;
  67396. }
  67397. #endif
  67398. #ifdef __LITTLE_ENDIAN__
  67399. __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
  67400. uint64x2_t __ret;
  67401. __ret = __p0 + vmovl_high_u32(__p1);
  67402. return __ret;
  67403. }
  67404. #else
  67405. __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
  67406. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67407. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67408. uint64x2_t __ret;
  67409. __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
  67410. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67411. return __ret;
  67412. }
  67413. #endif
  67414. #ifdef __LITTLE_ENDIAN__
  67415. __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
  67416. uint32x4_t __ret;
  67417. __ret = __p0 + vmovl_high_u16(__p1);
  67418. return __ret;
  67419. }
  67420. #else
  67421. __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
  67422. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67423. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67424. uint32x4_t __ret;
  67425. __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
  67426. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67427. return __ret;
  67428. }
  67429. #endif
  67430. #ifdef __LITTLE_ENDIAN__
  67431. __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
  67432. int16x8_t __ret;
  67433. __ret = __p0 + vmovl_high_s8(__p1);
  67434. return __ret;
  67435. }
  67436. #else
  67437. __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
  67438. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67439. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67440. int16x8_t __ret;
  67441. __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
  67442. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67443. return __ret;
  67444. }
  67445. #endif
  67446. #ifdef __LITTLE_ENDIAN__
  67447. __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
  67448. int64x2_t __ret;
  67449. __ret = __p0 + vmovl_high_s32(__p1);
  67450. return __ret;
  67451. }
  67452. #else
  67453. __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
  67454. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67455. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67456. int64x2_t __ret;
  67457. __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
  67458. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67459. return __ret;
  67460. }
  67461. #endif
  67462. #ifdef __LITTLE_ENDIAN__
  67463. __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
  67464. int32x4_t __ret;
  67465. __ret = __p0 + vmovl_high_s16(__p1);
  67466. return __ret;
  67467. }
  67468. #else
  67469. __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
  67470. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67471. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67472. int32x4_t __ret;
  67473. __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
  67474. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67475. return __ret;
  67476. }
  67477. #endif
  67478. #ifdef __LITTLE_ENDIAN__
  67479. #define vcopyq_lane_p64(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
  67480. poly64x2_t __s0_284 = __p0_284; \
  67481. poly64x1_t __s2_284 = __p2_284; \
  67482. poly64x2_t __ret_284; \
  67483. __ret_284 = vsetq_lane_p64(vget_lane_p64(__s2_284, __p3_284), __s0_284, __p1_284); \
  67484. __ret_284; \
  67485. })
  67486. #else
  67487. #define vcopyq_lane_p64(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
  67488. poly64x2_t __s0_285 = __p0_285; \
  67489. poly64x1_t __s2_285 = __p2_285; \
  67490. poly64x2_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
  67491. poly64x2_t __ret_285; \
  67492. __ret_285 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_285, __p3_285), __rev0_285, __p1_285); \
  67493. __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
  67494. __ret_285; \
  67495. })
  67496. #endif
  67497. #ifdef __LITTLE_ENDIAN__
  67498. #define vcopyq_lane_f64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
  67499. float64x2_t __s0_286 = __p0_286; \
  67500. float64x1_t __s2_286 = __p2_286; \
  67501. float64x2_t __ret_286; \
  67502. __ret_286 = vsetq_lane_f64(vget_lane_f64(__s2_286, __p3_286), __s0_286, __p1_286); \
  67503. __ret_286; \
  67504. })
  67505. #else
  67506. #define vcopyq_lane_f64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
  67507. float64x2_t __s0_287 = __p0_287; \
  67508. float64x1_t __s2_287 = __p2_287; \
  67509. float64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
  67510. float64x2_t __ret_287; \
  67511. __ret_287 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_287, __p3_287), __rev0_287, __p1_287); \
  67512. __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
  67513. __ret_287; \
  67514. })
  67515. #endif
  67516. #ifdef __LITTLE_ENDIAN__
  67517. #define vcopy_lane_p64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
  67518. poly64x1_t __s0_288 = __p0_288; \
  67519. poly64x1_t __s2_288 = __p2_288; \
  67520. poly64x1_t __ret_288; \
  67521. __ret_288 = vset_lane_p64(vget_lane_p64(__s2_288, __p3_288), __s0_288, __p1_288); \
  67522. __ret_288; \
  67523. })
  67524. #else
  67525. #define vcopy_lane_p64(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
  67526. poly64x1_t __s0_289 = __p0_289; \
  67527. poly64x1_t __s2_289 = __p2_289; \
  67528. poly64x1_t __ret_289; \
  67529. __ret_289 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_289, __p3_289), __s0_289, __p1_289); \
  67530. __ret_289; \
  67531. })
  67532. #endif
  67533. #ifdef __LITTLE_ENDIAN__
  67534. #define vcopy_lane_f64(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
  67535. float64x1_t __s0_290 = __p0_290; \
  67536. float64x1_t __s2_290 = __p2_290; \
  67537. float64x1_t __ret_290; \
  67538. __ret_290 = vset_lane_f64(vget_lane_f64(__s2_290, __p3_290), __s0_290, __p1_290); \
  67539. __ret_290; \
  67540. })
  67541. #else
  67542. #define vcopy_lane_f64(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
  67543. float64x1_t __s0_291 = __p0_291; \
  67544. float64x1_t __s2_291 = __p2_291; \
  67545. float64x1_t __ret_291; \
  67546. __ret_291 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_291, __p3_291), __s0_291, __p1_291); \
  67547. __ret_291; \
  67548. })
  67549. #endif
  67550. #ifdef __LITTLE_ENDIAN__
  67551. #define vcopyq_laneq_p64(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
  67552. poly64x2_t __s0_292 = __p0_292; \
  67553. poly64x2_t __s2_292 = __p2_292; \
  67554. poly64x2_t __ret_292; \
  67555. __ret_292 = vsetq_lane_p64(vgetq_lane_p64(__s2_292, __p3_292), __s0_292, __p1_292); \
  67556. __ret_292; \
  67557. })
  67558. #else
  67559. #define vcopyq_laneq_p64(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
  67560. poly64x2_t __s0_293 = __p0_293; \
  67561. poly64x2_t __s2_293 = __p2_293; \
  67562. poly64x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \
  67563. poly64x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \
  67564. poly64x2_t __ret_293; \
  67565. __ret_293 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_293, __p3_293), __rev0_293, __p1_293); \
  67566. __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \
  67567. __ret_293; \
  67568. })
  67569. #endif
  67570. #ifdef __LITTLE_ENDIAN__
  67571. #define vcopyq_laneq_f64(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
  67572. float64x2_t __s0_294 = __p0_294; \
  67573. float64x2_t __s2_294 = __p2_294; \
  67574. float64x2_t __ret_294; \
  67575. __ret_294 = vsetq_lane_f64(vgetq_lane_f64(__s2_294, __p3_294), __s0_294, __p1_294); \
  67576. __ret_294; \
  67577. })
  67578. #else
  67579. #define vcopyq_laneq_f64(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
  67580. float64x2_t __s0_295 = __p0_295; \
  67581. float64x2_t __s2_295 = __p2_295; \
  67582. float64x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \
  67583. float64x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
  67584. float64x2_t __ret_295; \
  67585. __ret_295 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_295, __p3_295), __rev0_295, __p1_295); \
  67586. __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \
  67587. __ret_295; \
  67588. })
  67589. #endif
  67590. #ifdef __LITTLE_ENDIAN__
  67591. #define vcopy_laneq_p64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
  67592. poly64x1_t __s0_296 = __p0_296; \
  67593. poly64x2_t __s2_296 = __p2_296; \
  67594. poly64x1_t __ret_296; \
  67595. __ret_296 = vset_lane_p64(vgetq_lane_p64(__s2_296, __p3_296), __s0_296, __p1_296); \
  67596. __ret_296; \
  67597. })
  67598. #else
  67599. #define vcopy_laneq_p64(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
  67600. poly64x1_t __s0_297 = __p0_297; \
  67601. poly64x2_t __s2_297 = __p2_297; \
  67602. poly64x2_t __rev2_297; __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 1, 0); \
  67603. poly64x1_t __ret_297; \
  67604. __ret_297 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_297, __p3_297), __s0_297, __p1_297); \
  67605. __ret_297; \
  67606. })
  67607. #endif
  67608. #ifdef __LITTLE_ENDIAN__
  67609. #define vcopy_laneq_f64(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
  67610. float64x1_t __s0_298 = __p0_298; \
  67611. float64x2_t __s2_298 = __p2_298; \
  67612. float64x1_t __ret_298; \
  67613. __ret_298 = vset_lane_f64(vgetq_lane_f64(__s2_298, __p3_298), __s0_298, __p1_298); \
  67614. __ret_298; \
  67615. })
  67616. #else
  67617. #define vcopy_laneq_f64(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
  67618. float64x1_t __s0_299 = __p0_299; \
  67619. float64x2_t __s2_299 = __p2_299; \
  67620. float64x2_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 1, 0); \
  67621. float64x1_t __ret_299; \
  67622. __ret_299 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_299, __p3_299), __s0_299, __p1_299); \
  67623. __ret_299; \
  67624. })
  67625. #endif
  67626. #ifdef __LITTLE_ENDIAN__
  67627. __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  67628. uint16x8_t __ret;
  67629. __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
  67630. return __ret;
  67631. }
  67632. #else
  67633. __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  67634. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67635. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67636. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67637. uint16x8_t __ret;
  67638. __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
  67639. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67640. return __ret;
  67641. }
  67642. #endif
  67643. #ifdef __LITTLE_ENDIAN__
  67644. __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  67645. uint64x2_t __ret;
  67646. __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
  67647. return __ret;
  67648. }
  67649. #else
  67650. __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  67651. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67652. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67653. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  67654. uint64x2_t __ret;
  67655. __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
  67656. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67657. return __ret;
  67658. }
  67659. #endif
  67660. #ifdef __LITTLE_ENDIAN__
  67661. __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  67662. uint32x4_t __ret;
  67663. __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
  67664. return __ret;
  67665. }
  67666. #else
  67667. __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  67668. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67669. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67670. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  67671. uint32x4_t __ret;
  67672. __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
  67673. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67674. return __ret;
  67675. }
  67676. #endif
  67677. #ifdef __LITTLE_ENDIAN__
  67678. __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  67679. int16x8_t __ret;
  67680. __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
  67681. return __ret;
  67682. }
  67683. #else
  67684. __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  67685. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67686. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67687. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67688. int16x8_t __ret;
  67689. __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
  67690. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67691. return __ret;
  67692. }
  67693. #endif
  67694. #ifdef __LITTLE_ENDIAN__
  67695. __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  67696. int64x2_t __ret;
  67697. __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
  67698. return __ret;
  67699. }
  67700. #else
  67701. __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  67702. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67703. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67704. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  67705. int64x2_t __ret;
  67706. __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
  67707. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67708. return __ret;
  67709. }
  67710. #endif
  67711. #ifdef __LITTLE_ENDIAN__
  67712. __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  67713. int32x4_t __ret;
  67714. __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
  67715. return __ret;
  67716. }
  67717. #else
  67718. __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  67719. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67720. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67721. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  67722. int32x4_t __ret;
  67723. __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
  67724. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67725. return __ret;
  67726. }
  67727. #endif
  67728. #ifdef __LITTLE_ENDIAN__
  67729. __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
  67730. uint64x2_t __ret;
  67731. __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
  67732. return __ret;
  67733. }
  67734. #else
  67735. __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
  67736. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67737. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67738. uint64x2_t __ret;
  67739. __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
  67740. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67741. return __ret;
  67742. }
  67743. #endif
  67744. #ifdef __LITTLE_ENDIAN__
  67745. __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
  67746. uint32x4_t __ret;
  67747. __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
  67748. return __ret;
  67749. }
  67750. #else
  67751. __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
  67752. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67753. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67754. uint32x4_t __ret;
  67755. __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
  67756. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67757. return __ret;
  67758. }
  67759. #endif
  67760. #ifdef __LITTLE_ENDIAN__
  67761. __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  67762. int64x2_t __ret;
  67763. __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
  67764. return __ret;
  67765. }
  67766. #else
  67767. __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  67768. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67769. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67770. int64x2_t __ret;
  67771. __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
  67772. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67773. return __ret;
  67774. }
  67775. #endif
  67776. #ifdef __LITTLE_ENDIAN__
  67777. __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  67778. int32x4_t __ret;
  67779. __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
  67780. return __ret;
  67781. }
  67782. #else
  67783. __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  67784. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67785. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67786. int32x4_t __ret;
  67787. __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
  67788. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67789. return __ret;
  67790. }
  67791. #endif
  67792. #ifdef __LITTLE_ENDIAN__
  67793. __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  67794. uint16x8_t __ret;
  67795. __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
  67796. return __ret;
  67797. }
  67798. #else
  67799. __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  67800. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67801. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67802. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67803. uint16x8_t __ret;
  67804. __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
  67805. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67806. return __ret;
  67807. }
  67808. #endif
  67809. #ifdef __LITTLE_ENDIAN__
  67810. __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  67811. uint64x2_t __ret;
  67812. __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
  67813. return __ret;
  67814. }
  67815. #else
  67816. __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  67817. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67818. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67819. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  67820. uint64x2_t __ret;
  67821. __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
  67822. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67823. return __ret;
  67824. }
  67825. #endif
  67826. #ifdef __LITTLE_ENDIAN__
  67827. __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  67828. uint32x4_t __ret;
  67829. __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
  67830. return __ret;
  67831. }
  67832. #else
  67833. __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  67834. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67835. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67836. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  67837. uint32x4_t __ret;
  67838. __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
  67839. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67840. return __ret;
  67841. }
  67842. #endif
  67843. #ifdef __LITTLE_ENDIAN__
  67844. __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  67845. int16x8_t __ret;
  67846. __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
  67847. return __ret;
  67848. }
  67849. #else
  67850. __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  67851. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  67852. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67853. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  67854. int16x8_t __ret;
  67855. __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
  67856. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  67857. return __ret;
  67858. }
  67859. #endif
  67860. #ifdef __LITTLE_ENDIAN__
  67861. __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  67862. int64x2_t __ret;
  67863. __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
  67864. return __ret;
  67865. }
  67866. #else
  67867. __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  67868. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67869. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67870. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  67871. int64x2_t __ret;
  67872. __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
  67873. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67874. return __ret;
  67875. }
  67876. #endif
  67877. #ifdef __LITTLE_ENDIAN__
  67878. __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  67879. int32x4_t __ret;
  67880. __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
  67881. return __ret;
  67882. }
  67883. #else
  67884. __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  67885. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67886. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67887. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  67888. int32x4_t __ret;
  67889. __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
  67890. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67891. return __ret;
  67892. }
  67893. #endif
  67894. #ifdef __LITTLE_ENDIAN__
  67895. __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
  67896. uint64x2_t __ret;
  67897. __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
  67898. return __ret;
  67899. }
  67900. #else
  67901. __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
  67902. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67903. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67904. uint64x2_t __ret;
  67905. __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
  67906. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67907. return __ret;
  67908. }
  67909. #endif
  67910. #ifdef __LITTLE_ENDIAN__
  67911. __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
  67912. uint32x4_t __ret;
  67913. __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
  67914. return __ret;
  67915. }
  67916. #else
  67917. __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
  67918. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67919. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67920. uint32x4_t __ret;
  67921. __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
  67922. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67923. return __ret;
  67924. }
  67925. #endif
  67926. #ifdef __LITTLE_ENDIAN__
  67927. __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  67928. int64x2_t __ret;
  67929. __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
  67930. return __ret;
  67931. }
  67932. #else
  67933. __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
  67934. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  67935. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  67936. int64x2_t __ret;
  67937. __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
  67938. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  67939. return __ret;
  67940. }
  67941. #endif
  67942. #ifdef __LITTLE_ENDIAN__
  67943. __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  67944. int32x4_t __ret;
  67945. __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
  67946. return __ret;
  67947. }
  67948. #else
  67949. __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
  67950. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  67951. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  67952. int32x4_t __ret;
  67953. __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
  67954. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  67955. return __ret;
  67956. }
  67957. #endif
  67958. #ifdef __LITTLE_ENDIAN__
  67959. #define vmulx_lane_f64(__p0_300, __p1_300, __p2_300) __extension__ ({ \
  67960. float64x1_t __s0_300 = __p0_300; \
  67961. float64x1_t __s1_300 = __p1_300; \
  67962. float64x1_t __ret_300; \
  67963. float64_t __x_300 = vget_lane_f64(__s0_300, 0); \
  67964. float64_t __y_300 = vget_lane_f64(__s1_300, __p2_300); \
  67965. float64_t __z_300 = vmulxd_f64(__x_300, __y_300); \
  67966. __ret_300 = vset_lane_f64(__z_300, __s0_300, __p2_300); \
  67967. __ret_300; \
  67968. })
  67969. #else
  67970. #define vmulx_lane_f64(__p0_301, __p1_301, __p2_301) __extension__ ({ \
  67971. float64x1_t __s0_301 = __p0_301; \
  67972. float64x1_t __s1_301 = __p1_301; \
  67973. float64x1_t __ret_301; \
  67974. float64_t __x_301 = __noswap_vget_lane_f64(__s0_301, 0); \
  67975. float64_t __y_301 = __noswap_vget_lane_f64(__s1_301, __p2_301); \
  67976. float64_t __z_301 = __noswap_vmulxd_f64(__x_301, __y_301); \
  67977. __ret_301 = __noswap_vset_lane_f64(__z_301, __s0_301, __p2_301); \
  67978. __ret_301; \
  67979. })
  67980. #endif
  67981. #ifdef __LITTLE_ENDIAN__
  67982. #define vmulx_laneq_f64(__p0_302, __p1_302, __p2_302) __extension__ ({ \
  67983. float64x1_t __s0_302 = __p0_302; \
  67984. float64x2_t __s1_302 = __p1_302; \
  67985. float64x1_t __ret_302; \
  67986. float64_t __x_302 = vget_lane_f64(__s0_302, 0); \
  67987. float64_t __y_302 = vgetq_lane_f64(__s1_302, __p2_302); \
  67988. float64_t __z_302 = vmulxd_f64(__x_302, __y_302); \
  67989. __ret_302 = vset_lane_f64(__z_302, __s0_302, 0); \
  67990. __ret_302; \
  67991. })
  67992. #else
  67993. #define vmulx_laneq_f64(__p0_303, __p1_303, __p2_303) __extension__ ({ \
  67994. float64x1_t __s0_303 = __p0_303; \
  67995. float64x2_t __s1_303 = __p1_303; \
  67996. float64x2_t __rev1_303; __rev1_303 = __builtin_shufflevector(__s1_303, __s1_303, 1, 0); \
  67997. float64x1_t __ret_303; \
  67998. float64_t __x_303 = __noswap_vget_lane_f64(__s0_303, 0); \
  67999. float64_t __y_303 = __noswap_vgetq_lane_f64(__rev1_303, __p2_303); \
  68000. float64_t __z_303 = __noswap_vmulxd_f64(__x_303, __y_303); \
  68001. __ret_303 = __noswap_vset_lane_f64(__z_303, __s0_303, 0); \
  68002. __ret_303; \
  68003. })
  68004. #endif
  68005. #endif
  68006. #ifdef __LITTLE_ENDIAN__
  68007. __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  68008. uint16x8_t __ret;
  68009. __ret = __p0 + vabdl_u8(__p1, __p2);
  68010. return __ret;
  68011. }
  68012. #else
  68013. __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  68014. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  68015. uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  68016. uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  68017. uint16x8_t __ret;
  68018. __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
  68019. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  68020. return __ret;
  68021. }
  68022. __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
  68023. uint16x8_t __ret;
  68024. __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
  68025. return __ret;
  68026. }
  68027. #endif
  68028. #ifdef __LITTLE_ENDIAN__
  68029. __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  68030. uint64x2_t __ret;
  68031. __ret = __p0 + vabdl_u32(__p1, __p2);
  68032. return __ret;
  68033. }
  68034. #else
  68035. __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  68036. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  68037. uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  68038. uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  68039. uint64x2_t __ret;
  68040. __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
  68041. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  68042. return __ret;
  68043. }
  68044. __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
  68045. uint64x2_t __ret;
  68046. __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
  68047. return __ret;
  68048. }
  68049. #endif
  68050. #ifdef __LITTLE_ENDIAN__
  68051. __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  68052. uint32x4_t __ret;
  68053. __ret = __p0 + vabdl_u16(__p1, __p2);
  68054. return __ret;
  68055. }
  68056. #else
  68057. __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  68058. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  68059. uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  68060. uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  68061. uint32x4_t __ret;
  68062. __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
  68063. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  68064. return __ret;
  68065. }
  68066. __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
  68067. uint32x4_t __ret;
  68068. __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
  68069. return __ret;
  68070. }
  68071. #endif
  68072. #ifdef __LITTLE_ENDIAN__
  68073. __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  68074. int16x8_t __ret;
  68075. __ret = __p0 + vabdl_s8(__p1, __p2);
  68076. return __ret;
  68077. }
  68078. #else
  68079. __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  68080. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  68081. int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  68082. int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  68083. int16x8_t __ret;
  68084. __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
  68085. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  68086. return __ret;
  68087. }
  68088. __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
  68089. int16x8_t __ret;
  68090. __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
  68091. return __ret;
  68092. }
  68093. #endif
  68094. #ifdef __LITTLE_ENDIAN__
  68095. __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  68096. int64x2_t __ret;
  68097. __ret = __p0 + vabdl_s32(__p1, __p2);
  68098. return __ret;
  68099. }
  68100. #else
  68101. __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  68102. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  68103. int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
  68104. int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
  68105. int64x2_t __ret;
  68106. __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
  68107. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  68108. return __ret;
  68109. }
  68110. __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
  68111. int64x2_t __ret;
  68112. __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
  68113. return __ret;
  68114. }
  68115. #endif
  68116. #ifdef __LITTLE_ENDIAN__
  68117. __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  68118. int32x4_t __ret;
  68119. __ret = __p0 + vabdl_s16(__p1, __p2);
  68120. return __ret;
  68121. }
  68122. #else
  68123. __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  68124. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  68125. int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  68126. int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  68127. int32x4_t __ret;
  68128. __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
  68129. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  68130. return __ret;
  68131. }
  68132. __ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
  68133. int32x4_t __ret;
  68134. __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
  68135. return __ret;
  68136. }
  68137. #endif
  68138. #if defined(__aarch64__)
  68139. #ifdef __LITTLE_ENDIAN__
  68140. __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  68141. uint16x8_t __ret;
  68142. __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
  68143. return __ret;
  68144. }
  68145. #else
  68146. __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
  68147. uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  68148. uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  68149. uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  68150. uint16x8_t __ret;
  68151. __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
  68152. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  68153. return __ret;
  68154. }
  68155. #endif
  68156. #ifdef __LITTLE_ENDIAN__
  68157. __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  68158. uint64x2_t __ret;
  68159. __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
  68160. return __ret;
  68161. }
  68162. #else
  68163. __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
  68164. uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  68165. uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  68166. uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  68167. uint64x2_t __ret;
  68168. __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
  68169. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  68170. return __ret;
  68171. }
  68172. #endif
  68173. #ifdef __LITTLE_ENDIAN__
  68174. __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  68175. uint32x4_t __ret;
  68176. __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
  68177. return __ret;
  68178. }
  68179. #else
  68180. __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
  68181. uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  68182. uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  68183. uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  68184. uint32x4_t __ret;
  68185. __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
  68186. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  68187. return __ret;
  68188. }
  68189. #endif
  68190. #ifdef __LITTLE_ENDIAN__
  68191. __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  68192. int16x8_t __ret;
  68193. __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
  68194. return __ret;
  68195. }
  68196. #else
  68197. __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
  68198. int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
  68199. int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  68200. int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
  68201. int16x8_t __ret;
  68202. __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
  68203. __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
  68204. return __ret;
  68205. }
  68206. #endif
  68207. #ifdef __LITTLE_ENDIAN__
  68208. __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  68209. int64x2_t __ret;
  68210. __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
  68211. return __ret;
  68212. }
  68213. #else
  68214. __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
  68215. int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
  68216. int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
  68217. int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
  68218. int64x2_t __ret;
  68219. __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
  68220. __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
  68221. return __ret;
  68222. }
  68223. #endif
  68224. #ifdef __LITTLE_ENDIAN__
  68225. __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  68226. int32x4_t __ret;
  68227. __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
  68228. return __ret;
  68229. }
  68230. #else
  68231. __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
  68232. int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
  68233. int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
  68234. int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
  68235. int32x4_t __ret;
  68236. __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
  68237. __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
  68238. return __ret;
  68239. }
  68240. #endif
  68241. #endif
  68242. #undef __ai
  68243. #endif /* __ARM_NEON_H */